index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/self_hosted_hugging_face.py | import importlib
import logging
from typing import Any, Callable, List, Optional
from langchain_community.embeddings.self_hosted import SelfHostedEmbeddings
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
"Represent the question for retrieving supporting documents: "
)
logger = logging.getLogger(__name__)
def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return client.encode(*args, **kwargs)
def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any:
"""Load the embedding model."""
if not instruct:
import sentence_transformers
client = sentence_transformers.SentenceTransformer(model_id)
else:
from InstructorEmbedding import INSTRUCTOR
client = INSTRUCTOR(model_id)
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
client = client.to(device)
return client
class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings):
"""HuggingFace embedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import SelfHostedHuggingFaceEmbeddings
import runhouse as rh
model_id = "sentence-transformers/all-mpnet-base-v2"
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceEmbeddings(model_id=model_id, hardware=gpu)
"""
client: Any #: :meta private:
model_id: str = DEFAULT_MODEL_NAME
"""Model name to use."""
model_reqs: List[str] = ["./", "sentence_transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_load_fn: Callable = load_embedding_model
"""Function to load the model remotely on the server."""
load_fn_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model load function."""
inference_fn: Callable = _embed_documents
"""Inference function to extract the embeddings."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings):
"""HuggingFace InstructEmbedding models on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another
cloud like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import SelfHostedHuggingFaceInstructEmbeddings
import runhouse as rh
model_name = "hkunlp/instructor-large"
gpu = rh.cluster(name='rh-a10x', instance_type='A100:1')
hf = SelfHostedHuggingFaceInstructEmbeddings(
model_name=model_name, hardware=gpu)
""" # noqa: E501
model_id: str = DEFAULT_INSTRUCT_MODEL
"""Model name to use."""
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION
"""Instruction to use for embedding documents."""
query_instruction: str = DEFAULT_QUERY_INSTRUCTION
"""Instruction to use for embedding query."""
model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"]
"""Requirements to install on hardware to inference the model."""
def __init__(self, **kwargs: Any):
"""Initialize the remote inference function."""
load_fn_kwargs = kwargs.pop("load_fn_kwargs", {})
load_fn_kwargs["model_id"] = load_fn_kwargs.get(
"model_id", DEFAULT_INSTRUCT_MODEL
)
load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True)
load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0)
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace instruct model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = []
for text in texts:
instruction_pairs.append([self.embed_instruction, text])
embeddings = self.client(self.pipeline_ref, instruction_pairs)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/johnsnowlabs.py | import os
import sys
from typing import Any, List
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
class JohnSnowLabsEmbeddings(BaseModel, Embeddings):
"""JohnSnowLabs embedding models
To use, you should have the ``johnsnowlabs`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
embedding = JohnSnowLabsEmbeddings(model='embed_sentence.bert')
output = embedding.embed_query("foo bar")
""" # noqa: E501
model: Any = "embed_sentence.bert"
def __init__(
self,
model: Any = "embed_sentence.bert",
hardware_target: str = "cpu",
**kwargs: Any,
):
"""Initialize the johnsnowlabs model."""
super().__init__(**kwargs)
# 1) Check imports
try:
from johnsnowlabs import nlp
from nlu.pipe.pipeline import NLUPipeline
except ImportError as exc:
raise ImportError(
"Could not import johnsnowlabs python package. "
"Please install it with `pip install johnsnowlabs`."
) from exc
# 2) Start a Spark Session
try:
os.environ["PYSPARK_PYTHON"] = sys.executable
os.environ["PYSPARK_DRIVER_PYTHON"] = sys.executable
nlp.start(hardware_target=hardware_target)
except Exception as exc:
raise Exception("Failure starting Spark Session") from exc
# 3) Load the model
try:
if isinstance(model, str):
self.model = nlp.load(model)
elif isinstance(model, NLUPipeline):
self.model = model
else:
self.model = nlp.to_nlu_pipe(model)
except Exception as exc:
raise Exception("Failure loading model") from exc
model_config = ConfigDict(
extra="forbid",
)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a JohnSnowLabs transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
df = self.model.predict(texts, output_level="document")
emb_col = None
for c in df.columns:
if "embedding" in c:
emb_col = c
return [vec.tolist() for vec in df[emb_col].tolist()]
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a JohnSnowLabs transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/llamacpp.py | from typing import Any, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, Field, model_validator
from typing_extensions import Self
class LlamaCppEmbeddings(BaseModel, Embeddings):
"""llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain_community.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any = None #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use. If None, the number
of threads is automatically determined."""
n_batch: Optional[int] = Field(512, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
verbose: bool = Field(True, alias="verbose")
"""Print verbose output to stderr."""
device: Optional[str] = Field(None, alias="device")
"""Device type to use and pass to the model"""
model_config = ConfigDict(
extra="forbid",
protected_namespaces=(),
)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that llama-cpp-python library is installed."""
model_path = self.model_path
model_param_names = [
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"verbose",
"device",
]
model_params = {k: getattr(self, k) for k in model_param_names}
# For backwards compatibility, only include if non-null.
if self.n_gpu_layers is not None:
model_params["n_gpu_layers"] = self.n_gpu_layers
try:
from llama_cpp import Llama
self.client = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ImportError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return self
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.create_embedding(texts)
return [list(map(float, e["embedding"])) for e in embeddings["data"]]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/cohere.py | from typing import Any, Dict, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
from langchain_community.llms.cohere import _create_retry_decorator
@deprecated(
since="0.0.30",
removal="1.0",
alternative_import="langchain_cohere.CohereEmbeddings",
)
class CohereEmbeddings(BaseModel, Embeddings):
"""Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v3.0",
cohere_api_key="my-api-key"
)
"""
client: Any = None #: :meta private:
"""Cohere client."""
async_client: Any = None #: :meta private:
"""Cohere async client."""
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
max_retries: int = 3
"""Maximum number of retries to make when generating."""
request_timeout: Optional[float] = None
"""Timeout in seconds for the Cohere API request."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
request_timeout = values.get("request_timeout")
try:
import cohere
client_name = values["user_agent"]
values["client"] = cohere.Client(
cohere_api_key,
timeout=request_timeout,
client_name=client_name,
)
values["async_client"] = cohere.AsyncClient(
cohere_api_key,
timeout=request_timeout,
client_name=client_name,
)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def embed_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the embed call."""
retry_decorator = _create_retry_decorator(self.max_retries)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
return self.client.embed(**kwargs)
return _embed_with_retry(**kwargs)
def aembed_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the embed call."""
retry_decorator = _create_retry_decorator(self.max_retries)
@retry_decorator
async def _embed_with_retry(**kwargs: Any) -> Any:
return await self.async_client.embed(**kwargs)
return _embed_with_retry(**kwargs)
def embed(
self, texts: List[str], *, input_type: Optional[str] = None
) -> List[List[float]]:
embeddings = self.embed_with_retry(
model=self.model,
texts=texts,
input_type=input_type,
truncate=self.truncate,
).embeddings
return [list(map(float, e)) for e in embeddings]
async def aembed(
self, texts: List[str], *, input_type: Optional[str] = None
) -> List[List[float]]:
embeddings = (
await self.aembed_with_retry(
model=self.model,
texts=texts,
input_type=input_type,
truncate=self.truncate,
)
).embeddings
return [list(map(float, e)) for e in embeddings]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.embed(texts, input_type="search_document")
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return await self.aembed(texts, input_type="search_document")
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed([text], input_type="search_query")[0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return (await self.aembed([text], input_type="search_query"))[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/google_palm.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(google.api_core.exceptions.ResourceExhausted)
| retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable)
| retry_if_exception_type(google.api_core.exceptions.GoogleAPIError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(
embeddings: GooglePalmEmbeddings, *args: Any, **kwargs: Any
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.client.generate_embeddings(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
class GooglePalmEmbeddings(BaseModel, Embeddings):
"""Google's PaLM Embeddings APIs."""
client: Any
google_api_key: Optional[str]
model_name: str = "models/embedding-gecko-001"
"""Model name to use."""
show_progress_bar: bool = False
"""Whether to show a tqdm progress bar. Must have `tqdm` installed."""
model_config = ConfigDict(protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key, python package exists."""
google_api_key = get_from_dict_or_env(
values, "google_api_key", "GOOGLE_API_KEY"
)
try:
import google.generativeai as genai
genai.configure(api_key=google_api_key)
except ImportError:
raise ImportError("Could not import google.generativeai python package.")
values["client"] = genai
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
if self.show_progress_bar:
try:
from tqdm import tqdm
iter_ = tqdm(texts, desc="GooglePalmEmbeddings")
except ImportError:
logger.warning(
"Unable to show progress bar because tqdm could not be imported. "
"Please install with `pip install tqdm`."
)
iter_ = texts
else:
iter_ = texts
return [self.embed_query(text) for text in iter_]
def embed_query(self, text: str) -> List[float]:
"""Embed query text."""
embedding = embed_with_retry(self, self.model_name, text)
return embedding["embedding"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/localai.py | from __future__ import annotations
import logging
import warnings
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from langchain_core.embeddings import Embeddings
from langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from pydantic import BaseModel, ConfigDict, Field, model_validator
from tenacity import (
AsyncRetrying,
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: LocalAIEmbeddings) -> Callable[[Any], Any]:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _async_retry_decorator(embeddings: LocalAIEmbeddings) -> Any:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
async_retrying = AsyncRetrying(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def wrap(func: Callable) -> Callable:
async def wrapped_f(*args: Any, **kwargs: Any) -> Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError("this is unreachable")
return wrapped_f
return wrap
# https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings
def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]):
import openai
raise openai.error.APIError("LocalAI API returned an empty embedding") # type: ignore[attr-defined]
return response
def embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
response = embeddings.client.create(**kwargs)
return _check_response(response)
return _embed_with_retry(**kwargs)
async def async_embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
@_async_retry_decorator(embeddings)
async def _async_embed_with_retry(**kwargs: Any) -> Any:
response = await embeddings.client.acreate(**kwargs)
return _check_response(response)
return await _async_embed_with_retry(**kwargs)
class LocalAIEmbeddings(BaseModel, Embeddings):
"""LocalAI embedding models.
Since LocalAI and OpenAI have 1:1 compatibility between APIs, this class
uses the ``openai`` Python package's ``openai.Embedding`` as its client.
Thus, you should have the ``openai`` python package installed, and defeat
the environment variable ``OPENAI_API_KEY`` by setting to a random string.
You also need to specify ``OPENAI_API_BASE`` to point to your LocalAI
service endpoint.
Example:
.. code-block:: python
from langchain_community.embeddings import LocalAIEmbeddings
openai = LocalAIEmbeddings(
openai_api_key="random-string",
openai_api_base="http://localhost:8080"
)
"""
client: Any = None #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model
openai_api_version: Optional[str] = None
openai_api_base: Optional[str] = None
# to support explicit proxy for LocalAI
openai_proxy: Optional[str] = None
embedding_ctx_length: int = 8191
"""The maximum number of tokens to embed at once."""
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
allowed_special: Union[Literal["all"], Set[str]] = set()
disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all"
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout in seconds for the LocalAI request."""
headers: Any = None
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
default_api_version = ""
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
default=default_api_version,
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
values["client"] = openai.Embedding # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _invocation_params(self) -> Dict:
openai_args = {
"model": self.model,
"request_timeout": self.request_timeout,
"headers": self.headers,
"api_key": self.openai_api_key,
"organization": self.openai_organization,
"api_base": self.openai_api_base,
"api_version": self.openai_api_version,
**self.model_kwargs,
}
if self.openai_proxy:
import openai
openai.proxy = { # type: ignore[attr-defined]
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment]
return openai_args
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint."""
# handle large input text
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return embed_with_retry(
self,
input=[text],
**self._invocation_params,
)["data"][0]["embedding"]
async def _aembedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint."""
# handle large input text
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return (
await async_embed_with_retry(
self,
input=[text],
**self._invocation_params,
)
)["data"][0]["embedding"]
def embed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to LocalAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
# call _embedding_func for each text
return [self._embedding_func(text, engine=self.deployment) for text in texts]
async def aembed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to LocalAI's embedding endpoint async for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for text in texts:
response = await self._aembedding_func(text, engine=self.deployment)
embeddings.append(response)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = self._embedding_func(text, engine=self.deployment)
return embedding
async def aembed_query(self, text: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint async for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = await self._aembedding_func(text, engine=self.deployment)
return embedding
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/oci_generative_ai.py | from enum import Enum
from typing import Any, Dict, Iterator, List, Mapping, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import pre_init
from pydantic import BaseModel, ConfigDict
CUSTOM_ENDPOINT_PREFIX = "ocid1.generativeaiendpoint"
class OCIAuthType(Enum):
"""OCI authentication types as enumerator."""
API_KEY = 1
SECURITY_TOKEN = 2
INSTANCE_PRINCIPAL = 3
RESOURCE_PRINCIPAL = 4
class OCIGenAIEmbeddings(BaseModel, Embeddings):
"""OCI embedding models.
To authenticate, the OCI client uses the methods described in
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm
The authentifcation method is passed through auth_type and should be one of:
API_KEY (default), SECURITY_TOKEN, INSTANCE_PRINCIPLE, RESOURCE_PRINCIPLE
Make sure you have the required policies (profile/roles) to
access the OCI Generative AI service. If a specific config profile is used,
you must pass the name of the profile (~/.oci/config) through auth_profile.
To use, you must provide the compartment id
along with the endpoint url, and model id
as named parameters to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import OCIGenAIEmbeddings
embeddings = OCIGenAIEmbeddings(
model_id="MY_EMBEDDING_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID"
)
"""
client: Any = None #: :meta private:
service_models: Any = None #: :meta private:
auth_type: Optional[str] = "API_KEY"
"""Authentication type, could be
API_KEY,
SECURITY_TOKEN,
INSTANCE_PRINCIPLE,
RESOURCE_PRINCIPLE
If not specified, API_KEY will be used
"""
auth_profile: Optional[str] = "DEFAULT"
"""The name of the profile in ~/.oci/config
If not specified , DEFAULT will be used
"""
model_id: Optional[str] = None
"""Id of the model to call, e.g., cohere.embed-english-light-v2.0"""
model_kwargs: Optional[Dict] = None
"""Keyword arguments to pass to the model"""
service_endpoint: Optional[str] = None
"""service endpoint url"""
compartment_id: Optional[str] = None
"""OCID of compartment"""
truncate: Optional[str] = "END"
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
batch_size: int = 96
"""Batch size of OCI GenAI embedding requests. OCI GenAI may handle up to 96 texts
per request"""
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict: # pylint: disable=no-self-argument
"""Validate that OCI config and python package exists in environment."""
# Skip creating new client if passed in constructor
if values["client"] is not None:
return values
try:
import oci
client_kwargs = {
"config": {},
"signer": None,
"service_endpoint": values["service_endpoint"],
"retry_strategy": oci.retry.DEFAULT_RETRY_STRATEGY,
"timeout": (10, 240), # default timeout config for OCI Gen AI service
}
if values["auth_type"] == OCIAuthType(1).name:
client_kwargs["config"] = oci.config.from_file(
profile_name=values["auth_profile"]
)
client_kwargs.pop("signer", None)
elif values["auth_type"] == OCIAuthType(2).name:
def make_security_token_signer(oci_config): # type: ignore[no-untyped-def]
pk = oci.signer.load_private_key_from_file(
oci_config.get("key_file"), None
)
with open(
oci_config.get("security_token_file"), encoding="utf-8"
) as f:
st_string = f.read()
return oci.auth.signers.SecurityTokenSigner(st_string, pk)
client_kwargs["config"] = oci.config.from_file(
profile_name=values["auth_profile"]
)
client_kwargs["signer"] = make_security_token_signer(
oci_config=client_kwargs["config"]
)
elif values["auth_type"] == OCIAuthType(3).name:
client_kwargs["signer"] = (
oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
)
elif values["auth_type"] == OCIAuthType(4).name:
client_kwargs["signer"] = (
oci.auth.signers.get_resource_principals_signer()
)
else:
raise ValueError("Please provide valid value to auth_type")
values["client"] = oci.generative_ai_inference.GenerativeAiInferenceClient(
**client_kwargs
)
except ImportError as ex:
raise ImportError(
"Could not import oci python package. "
"Please make sure you have the oci package installed."
) from ex
except Exception as e:
raise ValueError(
"Could not authenticate with OCI client. "
"Please check if ~/.oci/config exists. "
"If INSTANCE_PRINCIPLE or RESOURCE_PRINCIPLE is used, "
"Please check the specified "
"auth_profile and auth_type are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to OCIGenAI's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
from oci.generative_ai_inference import models
if not self.model_id:
raise ValueError("Model ID is required to embed documents")
if self.model_id.startswith(CUSTOM_ENDPOINT_PREFIX):
serving_mode = models.DedicatedServingMode(endpoint_id=self.model_id)
else:
serving_mode = models.OnDemandServingMode(model_id=self.model_id)
embeddings = []
def split_texts() -> Iterator[List[str]]:
for i in range(0, len(texts), self.batch_size):
yield texts[i : i + self.batch_size]
for chunk in split_texts():
invocation_obj = models.EmbedTextDetails(
serving_mode=serving_mode,
compartment_id=self.compartment_id,
truncate=self.truncate,
inputs=chunk,
)
response = self.client.embed_text(invocation_obj)
embeddings.extend(response.data.embeddings)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to OCIGenAI's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/embaas.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, SecretStr
from requests.adapters import HTTPAdapter, Retry
from typing_extensions import NotRequired, TypedDict
# Currently supported maximum batch size for embedding requests
MAX_BATCH_SIZE = 256
EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/"
class EmbaasEmbeddingsPayload(TypedDict):
"""Payload for the Embaas embeddings API."""
model: str
texts: List[str]
instruction: NotRequired[str]
class EmbaasEmbeddings(BaseModel, Embeddings):
"""Embaas's embedding service.
To use, you should have the
environment variable ``EMBAAS_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
# initialize with default model and instruction
from langchain_community.embeddings import EmbaasEmbeddings
emb = EmbaasEmbeddings()
# initialize with custom model and instruction
from langchain_community.embeddings import EmbaasEmbeddings
emb_model = "instructor-large"
emb_inst = "Represent the Wikipedia document for retrieval"
emb = EmbaasEmbeddings(
model=emb_model,
instruction=emb_inst
)
"""
model: str = "e5-large-v2"
"""The model used for embeddings."""
instruction: Optional[str] = None
"""Instruction used for domain-specific embeddings."""
api_url: str = EMBAAS_API_URL
"""The URL for the embaas embeddings API."""
embaas_api_key: Optional[SecretStr] = None
"""max number of retries for requests"""
max_retries: Optional[int] = 3
"""request timeout in seconds"""
timeout: Optional[int] = 30
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
embaas_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "embaas_api_key", "EMBAAS_API_KEY")
)
values["embaas_api_key"] = embaas_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying params."""
return {"model": self.model, "instruction": self.instruction}
def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload:
"""Generates payload for the API request."""
payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model)
if self.instruction:
payload["instruction"] = self.instruction
return payload
def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]:
"""Sends a request to the Embaas API and handles the response."""
headers = {
"Authorization": f"Bearer {self.embaas_api_key.get_secret_value()}", # type: ignore[union-attr]
"Content-Type": "application/json",
}
session = requests.Session()
retries = Retry(
total=self.max_retries,
backoff_factor=0.5,
allowed_methods=["POST"],
raise_on_status=True,
)
session.mount("http://", HTTPAdapter(max_retries=retries))
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.post(
self.api_url,
headers=headers,
json=payload,
timeout=self.timeout,
)
parsed_response = response.json()
embeddings = [item["embedding"] for item in parsed_response["data"]]
return embeddings
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings using the Embaas API."""
payload = self._generate_payload(texts)
try:
return self._handle_request(payload)
except requests.exceptions.RequestException as e:
if e.response is None or not e.response.text:
raise ValueError(f"Error raised by embaas embeddings API: {e}")
parsed_response = e.response.json()
if "message" in parsed_response:
raise ValueError(
"Validation Error raised by embaas embeddings API:"
f"{parsed_response['message']}"
)
raise
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Get embeddings for a list of texts.
Args:
texts: The list of texts to get embeddings for.
Returns:
List of embeddings, one for each text.
"""
batches = [
texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE)
]
embeddings = [self._generate_embeddings(batch) for batch in batches]
# flatten the list of lists into a single list
return [embedding for batch in embeddings for embedding in batch]
def embed_query(self, text: str) -> List[float]:
"""Get embeddings for a single text.
Args:
text: The text to get embeddings for.
Returns:
List of embeddings.
"""
return self.embed_documents([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/javelin_ai_gateway.py | from __future__ import annotations
from typing import Any, Iterator, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class JavelinAIGatewayEmbeddings(Embeddings, BaseModel):
"""Javelin AI Gateway embeddings.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain_community.embeddings import JavelinAIGatewayEmbeddings
embeddings = JavelinAIGatewayEmbeddings(
gateway_uri="<javelin-ai-gateway-uri>",
route="<your-javelin-gateway-embeddings-route>"
)
"""
client: Any
"""javelin client."""
route: str
"""The route to use for the Javelin AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the Javelin AI Gateway API."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
def _query(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = self.client.query_route(self.route, query_body={"input": txt})
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get("llm_response", {}).get("data", [])
for item in embeddings_chunk:
if "embedding" in item:
embeddings.append(item["embedding"])
except ValueError as e:
print("Failed to query route: " + str(e)) # noqa: T201
return embeddings
async def _aquery(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = await self.client.aquery_route(
self.route, query_body={"input": txt}
)
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get("llm_response", {}).get("data", [])
for item in embeddings_chunk:
if "embedding" in item:
embeddings.append(item["embedding"])
except ValueError as e:
print("Failed to query route: " + str(e)) # noqa: T201
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._query(texts)
def embed_query(self, text: str) -> List[float]:
return self._query([text])[0]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return await self._aquery(texts)
async def aembed_query(self, text: str) -> List[float]:
result = await self._aquery([text])
return result[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/fake.py | import hashlib
from typing import List
import numpy as np
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel
class FakeEmbeddings(Embeddings, BaseModel):
"""Fake embedding model."""
size: int
"""The size of the embedding vector."""
def _get_embedding(self) -> List[float]:
return list(np.random.normal(size=self.size))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding() for _ in texts]
def embed_query(self, text: str) -> List[float]:
return self._get_embedding()
class DeterministicFakeEmbedding(Embeddings, BaseModel):
"""
Fake embedding model that always returns
the same embedding vector for the same text.
"""
size: int
"""The size of the embedding vector."""
def _get_embedding(self, seed: int) -> List[float]:
# set the seed for the random generator
np.random.seed(seed)
return list(np.random.normal(size=self.size))
@staticmethod
def _get_seed(text: str) -> int:
"""
Get a seed for the random generator, using the hash of the text.
"""
return int(hashlib.sha256(text.encode("utf-8")).hexdigest(), 16) % 10**8
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return [self._get_embedding(seed=self._get_seed(_)) for _ in texts]
def embed_query(self, text: str) -> List[float]:
return self._get_embedding(seed=self._get_seed(text))
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/openai.py | from __future__ import annotations
import logging
import os
import warnings
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import numpy as np
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
pre_init,
)
from pydantic import BaseModel, ConfigDict, Field, model_validator
from tenacity import (
AsyncRetrying,
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]:
import openai
# Wait 2^x * 1 second between each retry starting with
# retry_min_seconds seconds, then up to retry_max_seconds seconds,
# then retry_max_seconds seconds afterwards
# retry_min_seconds and retry_max_seconds are optional arguments of
# OpenAIEmbeddings
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(
multiplier=1,
min=embeddings.retry_min_seconds,
max=embeddings.retry_max_seconds,
),
retry=(
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
import openai
# Wait 2^x * 1 second between each retry starting with
# retry_min_seconds seconds, then up to retry_max_seconds seconds,
# then retry_max_seconds seconds afterwards
# retry_min_seconds and retry_max_seconds are optional arguments of
# OpenAIEmbeddings
async_retrying = AsyncRetrying(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(
multiplier=1,
min=embeddings.retry_min_seconds,
max=embeddings.retry_max_seconds,
),
retry=(
retry_if_exception_type(openai.error.Timeout) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.APIConnectionError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.RateLimitError) # type: ignore[attr-defined]
| retry_if_exception_type(openai.error.ServiceUnavailableError) # type: ignore[attr-defined]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def wrap(func: Callable) -> Callable:
async def wrapped_f(*args: Any, **kwargs: Any) -> Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError("this is unreachable")
return wrapped_f
return wrap
# https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings
def _check_response(response: dict, skip_empty: bool = False) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]) and not skip_empty:
import openai
raise openai.error.APIError("OpenAI API returned an empty embedding") # type: ignore[attr-defined]
return response
def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
if is_openai_v1():
return embeddings.client.create(**kwargs)
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
response = embeddings.client.create(**kwargs)
return _check_response(response, skip_empty=embeddings.skip_empty)
return _embed_with_retry(**kwargs)
async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
if is_openai_v1():
return await embeddings.async_client.create(**kwargs)
@_async_retry_decorator(embeddings)
async def _async_embed_with_retry(**kwargs: Any) -> Any:
response = await embeddings.client.acreate(**kwargs)
return _check_response(response, skip_empty=embeddings.skip_empty)
return await _async_embed_with_retry(**kwargs)
@deprecated(
since="0.0.9",
removal="1.0",
alternative_import="langchain_openai.OpenAIEmbeddings",
)
class OpenAIEmbeddings(BaseModel, Embeddings):
"""OpenAI embedding models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
openai = OpenAIEmbeddings(openai_api_key="my-api-key")
In order to use the library with Microsoft Azure endpoints, you need to set
the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION.
The OPENAI_API_TYPE must be set to 'azure' and the others correspond to
the properties of your endpoint.
In addition, the deployment name must be passed as the model parameter.
Example:
.. code-block:: python
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key"
os.environ["OPENAI_API_VERSION"] = "2023-05-15"
os.environ["OPENAI_PROXY"] = "http://your-corporate-proxy:8080"
from langchain_community.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings(
deployment="your-embeddings-deployment-name",
model="your-embeddings-model-name",
openai_api_base="https://your-endpoint.openai.azure.com/",
openai_api_type="azure",
)
text = "This is a test query."
query_result = embeddings.embed_query(text)
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model: str = "text-embedding-ada-002"
# to support Azure OpenAI Service custom deployment names
deployment: Optional[str] = model
# TODO: Move to AzureOpenAIEmbeddings.
openai_api_version: Optional[str] = Field(default=None, alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
# to support Azure OpenAI Service custom endpoints
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
# to support Azure OpenAI Service custom endpoints
openai_api_type: Optional[str] = None
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
embedding_ctx_length: int = 8191
"""The maximum number of tokens to embed at once."""
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
allowed_special: Union[Literal["all"], Set[str]] = set()
disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all"
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float], Any]] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
headers: Any = None
tiktoken_enabled: bool = True
"""Set this to False for non-OpenAI implementations of the embeddings API, e.g.
the `--extensions openai` extension for `text-generation-webui`"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
skip_empty: bool = False
"""Whether to skip empty strings when embedding or raise an error.
Defaults to not skipping."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
retry_min_seconds: int = 4
"""Min number of seconds to wait between retries"""
retry_max_seconds: int = 20
"""Max number of seconds to wait between retries"""
http_client: Union[Any, None] = None
"""Optional httpx.Client."""
model_config = ConfigDict(
populate_by_name=True, extra="forbid", protected_namespaces=()
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: Dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_api_type"] = get_from_dict_or_env(
values,
"openai_api_type",
"OPENAI_API_TYPE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
if values["openai_api_type"] in ("azure", "azure_ad", "azuread"):
default_api_version = "2023-05-15"
# Azure OpenAI embedding models allow a maximum of 2048
# texts at a time in each batch
# See: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings
values["chunk_size"] = min(values["chunk_size"], 2048)
else:
default_api_version = ""
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
default=default_api_version,
)
# Check OPENAI_ORGANIZATION for backwards compatibility.
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
else:
if is_openai_v1():
if values["openai_api_type"] in ("azure", "azure_ad", "azuread"):
warnings.warn(
"If you have openai>=1.0.0 installed and are using Azure, "
"please use the `AzureOpenAIEmbeddings` class."
)
client_params = {
"api_key": values["openai_api_key"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).embeddings
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).embeddings
elif not values.get("client"):
values["client"] = openai.Embedding # type: ignore[attr-defined]
else:
pass
return values
@property
def _invocation_params(self) -> Dict[str, Any]:
if is_openai_v1():
openai_args: Dict = {"model": self.model, **self.model_kwargs}
else:
openai_args = {
"model": self.model,
"request_timeout": self.request_timeout,
"headers": self.headers,
"api_key": self.openai_api_key,
"organization": self.openai_organization,
"api_base": self.openai_api_base,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
**self.model_kwargs,
}
if self.openai_api_type in ("azure", "azure_ad", "azuread"):
openai_args["engine"] = self.deployment
# TODO: Look into proxy with openai v1.
if self.openai_proxy:
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
openai.proxy = { # type: ignore[attr-defined]
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment]
return openai_args
# please refer to
# https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
def _get_len_safe_embeddings(
self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None
) -> List[List[float]]:
"""
Generate length-safe embeddings for a list of texts.
This method handles tokenization and embedding generation, respecting the
set embedding context length and chunk size. It supports both tiktoken
and HuggingFace tokenizer based on the tiktoken_enabled flag.
Args:
texts (List[str]): A list of texts to embed.
engine (str): The engine or model to use for embeddings.
chunk_size (Optional[int]): The size of chunks for processing embeddings.
Returns:
List[List[float]]: A list of embeddings for each input text.
"""
tokens = []
indices = []
model_name = self.tiktoken_model_name or self.model
_chunk_size = chunk_size or self.chunk_size
# If tiktoken flag set to False
if not self.tiktoken_enabled:
try:
from transformers import AutoTokenizer
except ImportError:
raise ImportError(
"Could not import transformers python package. "
"This is needed in order to for OpenAIEmbeddings without "
"`tiktoken`. Please install it with `pip install transformers`. "
)
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_name
)
for i, text in enumerate(texts):
# Tokenize the text using HuggingFace transformers
tokenized = tokenizer.encode(text, add_special_tokens=False)
# Split tokens into chunks respecting the embedding_ctx_length
for j in range(0, len(tokenized), self.embedding_ctx_length):
token_chunk = tokenized[j : j + self.embedding_ctx_length]
# Convert token IDs back to a string
chunk_text = tokenizer.decode(token_chunk)
tokens.append(chunk_text)
indices.append(i)
else:
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for OpenAIEmbeddings. "
"Please install it with `pip install tiktoken`."
)
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken.get_encoding(model)
for i, text in enumerate(texts):
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/
# issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
token = encoding.encode(
text=text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
# Split tokens into chunks respecting the embedding_ctx_length
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
_iter = tqdm(range(0, len(tokens), _chunk_size))
except ImportError:
_iter = range(0, len(tokens), _chunk_size)
else:
_iter = range(0, len(tokens), _chunk_size)
batched_embeddings: List[List[float]] = []
for i in _iter:
response = embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
if not isinstance(response, dict):
response = response.dict()
batched_embeddings.extend(r["embedding"] for r in response["data"])
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))]
for i in range(len(indices)):
if self.skip_empty and len(batched_embeddings[i]) == 1:
continue
results[indices[i]].append(batched_embeddings[i])
num_tokens_in_batch[indices[i]].append(len(tokens[i]))
embeddings: List[List[float]] = [[] for _ in range(len(texts))]
for i in range(len(texts)):
_result = results[i]
if len(_result) == 0:
average_embedded = embed_with_retry(
self,
input="",
**self._invocation_params,
)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average = average_embedded["data"][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
# please refer to
# https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
async def _aget_len_safe_embeddings(
self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None
) -> List[List[float]]:
"""
Asynchronously generate length-safe embeddings for a list of texts.
This method handles tokenization and asynchronous embedding generation,
respecting the set embedding context length and chunk size. It supports both
`tiktoken` and HuggingFace `tokenizer` based on the tiktoken_enabled flag.
Args:
texts (List[str]): A list of texts to embed.
engine (str): The engine or model to use for embeddings.
chunk_size (Optional[int]): The size of chunks for processing embeddings.
Returns:
List[List[float]]: A list of embeddings for each input text.
"""
tokens = []
indices = []
model_name = self.tiktoken_model_name or self.model
_chunk_size = chunk_size or self.chunk_size
# If tiktoken flag set to False
if not self.tiktoken_enabled:
try:
from transformers import AutoTokenizer
except ImportError:
raise ImportError(
"Could not import transformers python package. "
"This is needed in order to for OpenAIEmbeddings without "
" `tiktoken`. Please install it with `pip install transformers`."
)
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_name
)
for i, text in enumerate(texts):
# Tokenize the text using HuggingFace transformers
tokenized = tokenizer.encode(text, add_special_tokens=False)
# Split tokens into chunks respecting the embedding_ctx_length
for j in range(0, len(tokenized), self.embedding_ctx_length):
token_chunk = tokenized[j : j + self.embedding_ctx_length]
# Convert token IDs back to a string
chunk_text = tokenizer.decode(token_chunk)
tokens.append(chunk_text)
indices.append(i)
else:
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to for OpenAIEmbeddings. "
"Please install it with `pip install tiktoken`."
)
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken.get_encoding(model)
for i, text in enumerate(texts):
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/
# issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
token = encoding.encode(
text=text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
# Split tokens into chunks respecting the embedding_ctx_length
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: List[List[float]] = []
_chunk_size = chunk_size or self.chunk_size
for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
if not isinstance(response, dict):
response = response.dict()
batched_embeddings.extend(r["embedding"] for r in response["data"])
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))]
for i in range(len(indices)):
results[indices[i]].append(batched_embeddings[i])
num_tokens_in_batch[indices[i]].append(len(tokens[i]))
embeddings: List[List[float]] = [[] for _ in range(len(texts))]
for i in range(len(texts)):
_result = results[i]
if len(_result) == 0:
average_embedded = await async_embed_with_retry(
self,
input="",
**self._invocation_params,
)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average = average_embedded["data"][0]["embedding"]
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
def embed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to OpenAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
# NOTE: to keep things simple, we assume the list may contain texts longer
# than the maximum context and use length-safe embedding function.
engine = cast(str, self.deployment)
return self._get_len_safe_embeddings(texts, engine=engine)
async def aembed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to OpenAI's embedding endpoint async for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
# NOTE: to keep things simple, we assume the list may contain texts longer
# than the maximum context and use length-safe embedding function.
engine = cast(str, self.deployment)
return await self._aget_len_safe_embeddings(texts, engine=engine)
def embed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
return self.embed_documents([text])[0]
async def aembed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint async for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embeddings = await self.aembed_documents([text])
return embeddings[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/ollama.py | import logging
from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
logger = logging.getLogger(__name__)
@deprecated(
since="0.3.1",
removal="1.0.0",
alternative_import="langchain_ollama.OllamaEmbeddings",
)
class OllamaEmbeddings(BaseModel, Embeddings):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain_community.embeddings import OllamaEmbeddings
ollama_emb = OllamaEmbeddings(
model="llama:7b",
)
r1 = ollama_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = ollama_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
base_url: str = "http://localhost:11434"
"""Base url the model is hosted under."""
model: str = "llama2"
"""Model name to use."""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
mirostat: Optional[int] = None
"""Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
mirostat_eta: Optional[float] = None
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)"""
mirostat_tau: Optional[float] = None
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)"""
num_ctx: Optional[int] = None
"""Sets the size of the context window used to generate the
next token. (Default: 2048) """
num_gpu: Optional[int] = None
"""The number of GPUs to use. On macOS it defaults to 1 to
enable metal support, 0 to disable."""
num_thread: Optional[int] = None
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
repeat_last_n: Optional[int] = None
"""Sets how far back for the model to look back to prevent
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
repeat_penalty: Optional[float] = None
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
will penalize repetitions more strongly, while a lower value (e.g., 0.9)
will be more lenient. (Default: 1.1)"""
temperature: Optional[float] = None
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)"""
stop: Optional[List[str]] = None
"""Sets the stop tokens to use."""
tfs_z: Optional[float] = None
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)"""
top_k: Optional[int] = None
"""Reduces the probability of generating nonsense. A higher value (e.g. 100)
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[float] = None
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""
show_progress: bool = False
"""Whether to show a tqdm progress bar. Must have `tqdm` installed."""
headers: Optional[dict] = None
"""Additional headers to pass to endpoint (e.g. Authorization, Referer).
This is useful when Ollama is hosted on cloud services that require
tokens for authentication.
"""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {
"model": self.model,
"options": {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"stop": self.stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
}
model_kwargs: Optional[dict] = None
"""Other model keyword args"""
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def _process_emb_response(self, input: str) -> List[float]:
"""Process a response from the API.
Args:
response: The response from the API.
Returns:
The response as a dictionary.
"""
headers = {
"Content-Type": "application/json",
**(self.headers or {}),
}
try:
res = requests.post(
f"{self.base_url}/api/embeddings",
headers=headers,
json={"model": self.model, "prompt": input, **self._default_params},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
return t["embedding"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
def _embed(self, input: List[str]) -> List[List[float]]:
if self.show_progress:
try:
from tqdm import tqdm
iter_ = tqdm(input, desc="OllamaEmbeddings")
except ImportError:
logger.warning(
"Unable to show progress bar because tqdm could not be imported. "
"Please install with `pip install tqdm`."
)
iter_ = input
else:
iter_ = input
return [self._process_emb_response(prompt) for prompt in iter_]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using an Ollama deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f"{self.embed_instruction}{text}" for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Ollama deployed embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = f"{self.query_instruction}{text}"
embedding = self._embed([instruction_pair])[0]
return embedding
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/titan_takeoff.py | from enum import Enum
from typing import Any, Dict, List, Optional, Set, Union
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
class TakeoffEmbeddingException(Exception):
"""Custom exception for interfacing with Takeoff Embedding class."""
class MissingConsumerGroup(TakeoffEmbeddingException):
"""Exception raised when no consumer group is provided on initialization of
TitanTakeoffEmbed or in embed request."""
class Device(str, Enum):
"""Device to use for inference, cuda or cpu."""
cuda = "cuda"
cpu = "cpu"
class ReaderConfig(BaseModel):
"""Configuration for the reader to be deployed in Takeoff."""
model_config = ConfigDict(
protected_namespaces=(),
)
model_name: str
"""The name of the model to use"""
device: Device = Device.cuda
"""The device to use for inference, cuda or cpu"""
consumer_group: str = "primary"
"""The consumer group to place the reader into"""
class TitanTakeoffEmbed(Embeddings):
"""Interface with Takeoff Inference API for embedding models.
Use it to send embedding requests and to deploy embedding
readers with Takeoff.
Examples:
This is an example how to deploy an embedding model and send requests.
.. code-block:: python
# Import the TitanTakeoffEmbed class from community package
import time
from langchain_community.embeddings import TitanTakeoffEmbed
# Specify the embedding reader you'd like to deploy
reader_1 = {
"model_name": "avsolatorio/GIST-large-Embedding-v0",
"device": "cpu",
"consumer_group": "embed"
}
# For every reader you pass into models arg Takeoff will spin up a reader
# according to the specs you provide. If you don't specify the arg no models
# are spun up and it assumes you have already done this separately.
embed = TitanTakeoffEmbed(models=[reader_1])
# Wait for the reader to be deployed, time needed depends on the model size
# and your internet speed
time.sleep(60)
# Returns the embedded query, ie a List[float], sent to `embed` consumer
# group where we just spun up the embedding reader
print(embed.embed_query(
"Where can I see football?", consumer_group="embed"
))
# Returns a List of embeddings, ie a List[List[float]], sent to `embed`
# consumer group where we just spun up the embedding reader
print(embed.embed_document(
["Document1", "Document2"],
consumer_group="embed"
))
"""
base_url: str = "http://localhost"
"""The base URL of the Titan Takeoff (Pro) server. Default = "http://localhost"."""
port: int = 3000
"""The port of the Titan Takeoff (Pro) server. Default = 3000."""
mgmt_port: int = 3001
"""The management port of the Titan Takeoff (Pro) server. Default = 3001."""
client: Any = None
"""Takeoff Client Python SDK used to interact with Takeoff API"""
embed_consumer_groups: Set[str] = set()
"""The consumer groups in Takeoff which contain embedding models"""
def __init__(
self,
base_url: str = "http://localhost",
port: int = 3000,
mgmt_port: int = 3001,
models: List[ReaderConfig] = [],
):
"""Initialize the Titan Takeoff embedding wrapper.
Args:
base_url (str, optional): The base url where Takeoff Inference Server is
listening. Defaults to "http://localhost".
port (int, optional): What port is Takeoff Inference API listening on.
Defaults to 3000.
mgmt_port (int, optional): What port is Takeoff Management API listening on.
Defaults to 3001.
models (List[ReaderConfig], optional): Any readers you'd like to spin up on.
Defaults to [].
Raises:
ImportError: If you haven't installed takeoff-client, you will get an
ImportError. To remedy run `pip install 'takeoff-client==0.4.0'`
"""
self.base_url = base_url
self.port = port
self.mgmt_port = mgmt_port
try:
from takeoff_client import TakeoffClient
except ImportError:
raise ImportError(
"takeoff-client is required for TitanTakeoff. "
"Please install it with `pip install 'takeoff-client==0.4.0'`."
)
self.client = TakeoffClient(
self.base_url, port=self.port, mgmt_port=self.mgmt_port
)
for model in models:
self.client.create_reader(model)
if isinstance(model, dict):
self.embed_consumer_groups.add(model.get("consumer_group"))
else:
self.embed_consumer_groups.add(model.consumer_group)
super(TitanTakeoffEmbed, self).__init__()
def _embed(
self, input: Union[List[str], str], consumer_group: Optional[str]
) -> Dict[str, Any]:
"""Embed text.
Args:
input (Union[List[str], str]): prompt/document or list of prompts/documents
to embed
consumer_group (Optional[str]): what consumer group to send the embedding
request to. If not specified and there is only one
consumer group specified during initialization, it will be used. If there
are multiple consumer groups specified during initialization, you must
specify which one to use.
Raises:
MissingConsumerGroup: The consumer group can not be inferred from the
initialization and must be specified with request.
Returns:
Dict[str, Any]: Result of query, {"result": List[List[float]]} or
{"result": List[float]}
"""
if not consumer_group:
if len(self.embed_consumer_groups) == 1:
consumer_group = list(self.embed_consumer_groups)[0]
elif len(self.embed_consumer_groups) > 1:
raise MissingConsumerGroup(
"TakeoffEmbedding was initialized with multiple embedding reader"
"groups, you must specify which one to use."
)
else:
raise MissingConsumerGroup(
"You must specify what consumer group you want to send embedding"
"response to as TitanTakeoffEmbed was not initialized with an "
"embedding reader."
)
return self.client.embed(input, consumer_group)
def embed_documents(
self, texts: List[str], consumer_group: Optional[str] = None
) -> List[List[float]]:
"""Embed documents.
Args:
texts (List[str]): List of prompts/documents to embed
consumer_group (Optional[str], optional): Consumer group to send request
to containing embedding model. Defaults to None.
Returns:
List[List[float]]: List of embeddings
"""
return self._embed(texts, consumer_group)["result"]
def embed_query(
self, text: str, consumer_group: Optional[str] = None
) -> List[float]:
"""Embed query.
Args:
text (str): Prompt/document to embed
consumer_group (Optional[str], optional): Consumer group to send request
to containing embedding model. Defaults to None.
Returns:
List[float]: Embedding
"""
return self._embed(text, consumer_group)["result"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/tensorflow_hub.py | from typing import Any, List
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
class TensorflowHubEmbeddings(BaseModel, Embeddings):
"""TensorflowHub embedding models.
To use, you should have the ``tensorflow_text`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import TensorflowHubEmbeddings
url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
tf = TensorflowHubEmbeddings(model_url=url)
"""
embed: Any = None #: :meta private:
model_url: str = DEFAULT_MODEL_URL
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the tensorflow_hub and tensorflow_text."""
super().__init__(**kwargs)
try:
import tensorflow_hub
except ImportError:
raise ImportError(
"Could not import tensorflow-hub python package. "
"Please install it with `pip install tensorflow-hub``."
)
try:
import tensorflow_text # noqa
except ImportError:
raise ImportError(
"Could not import tensorflow_text python package. "
"Please install it with `pip install tensorflow_text``."
)
self.embed = tensorflow_hub.load(self.model_url)
model_config = ConfigDict(
extra="forbid",
protected_namespaces=(),
)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a TensorflowHub embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
embeddings = self.embed(texts).numpy()
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a TensorflowHub embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.embed([text]).numpy()[0]
return embedding.tolist()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/yandex.py | """Wrapper around YandexGPT embedding models."""
from __future__ import annotations
import logging
import time
from typing import Any, Callable, Dict, List, Sequence
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, Field, SecretStr
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
class YandexGPTEmbeddings(BaseModel, Embeddings):
"""YandexGPT Embeddings models.
To use, you should have the ``yandexcloud`` python package installed.
There are two authentication options for the service account
with the ``ai.languageModels.user`` role:
- You can specify the token in a constructor parameter `iam_token`
or in an environment variable `YC_IAM_TOKEN`.
- You can specify the key in a constructor parameter `api_key`
or in an environment variable `YC_API_KEY`.
To use the default model specify the folder ID in a parameter `folder_id`
or in an environment variable `YC_FOLDER_ID`.
Example:
.. code-block:: python
from langchain_community.embeddings.yandex import YandexGPTEmbeddings
embeddings = YandexGPTEmbeddings(iam_token="t1.9eu...", folder_id=<folder-id>)
""" # noqa: E501
iam_token: SecretStr = "" # type: ignore[assignment]
"""Yandex Cloud IAM token for service account
with the `ai.languageModels.user` role"""
api_key: SecretStr = "" # type: ignore[assignment]
"""Yandex Cloud Api Key for service account
with the `ai.languageModels.user` role"""
model_uri: str = Field(default="", alias="query_model_uri")
"""Query model uri to use."""
doc_model_uri: str = ""
"""Doc model uri to use."""
folder_id: str = ""
"""Yandex Cloud folder ID"""
doc_model_name: str = "text-search-doc"
"""Doc model name to use."""
model_name: str = Field(default="text-search-query", alias="query_model_name")
"""Query model name to use."""
model_version: str = "latest"
"""Model version to use."""
url: str = "llm.api.cloud.yandex.net:443"
"""The url of the API."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
sleep_interval: float = 0.0
"""Delay between API requests"""
disable_request_logging: bool = False
"""YandexGPT API logs all request data by default.
If you provide personal data, confidential information, disable logging."""
grpc_metadata: Sequence
model_config = ConfigDict(populate_by_name=True, protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that iam token exists in environment."""
iam_token = convert_to_secret_str(
get_from_dict_or_env(values, "iam_token", "YC_IAM_TOKEN", "")
)
values["iam_token"] = iam_token
api_key = convert_to_secret_str(
get_from_dict_or_env(values, "api_key", "YC_API_KEY", "")
)
values["api_key"] = api_key
folder_id = get_from_dict_or_env(values, "folder_id", "YC_FOLDER_ID", "")
values["folder_id"] = folder_id
if api_key.get_secret_value() == "" and iam_token.get_secret_value() == "":
raise ValueError("Either 'YC_API_KEY' or 'YC_IAM_TOKEN' must be provided.")
if values["iam_token"]:
values["grpc_metadata"] = [
("authorization", f"Bearer {values['iam_token'].get_secret_value()}")
]
if values["folder_id"]:
values["grpc_metadata"].append(("x-folder-id", values["folder_id"]))
else:
values["grpc_metadata"] = [
("authorization", f"Api-Key {values['api_key'].get_secret_value()}"),
]
if not values.get("doc_model_uri"):
if values["folder_id"] == "":
raise ValueError("'doc_model_uri' or 'folder_id' must be provided.")
values["doc_model_uri"] = (
f"emb://{values['folder_id']}/{values['doc_model_name']}/{values['model_version']}"
)
if not values.get("model_uri"):
if values["folder_id"] == "":
raise ValueError("'model_uri' or 'folder_id' must be provided.")
values["model_uri"] = (
f"emb://{values['folder_id']}/{values['model_name']}/{values['model_version']}"
)
if values["disable_request_logging"]:
values["grpc_metadata"].append(
(
"x-data-logging-enabled",
"false",
)
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a YandexGPT embeddings models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return _embed_with_retry(self, texts=texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a YandexGPT embeddings models.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return _embed_with_retry(self, texts=[text], embed_query=True)[0]
def _create_retry_decorator(llm: YandexGPTEmbeddings) -> Callable[[Any], Any]:
from grpc import RpcError
min_seconds = 1
max_seconds = 60
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type((RpcError))),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _embed_with_retry(llm: YandexGPTEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**_kwargs: Any) -> Any:
return _make_request(llm, **_kwargs)
return _completion_with_retry(**kwargs)
def _make_request(self: YandexGPTEmbeddings, texts: List[str], **kwargs): # type: ignore[no-untyped-def]
try:
import grpc
try:
from yandex.cloud.ai.foundation_models.v1.embedding.embedding_service_pb2 import ( # noqa: E501
TextEmbeddingRequest,
)
from yandex.cloud.ai.foundation_models.v1.embedding.embedding_service_pb2_grpc import ( # noqa: E501
EmbeddingsServiceStub,
)
except ModuleNotFoundError:
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import ( # noqa: E501
TextEmbeddingRequest,
)
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import ( # noqa: E501
EmbeddingsServiceStub,
)
except ImportError as e:
raise ImportError(
"Please install YandexCloud SDK with `pip install yandexcloud` \
or upgrade it to recent version."
) from e
result = []
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
# Use the query model if embed_query is True
if kwargs.get("embed_query"):
model_uri = self.model_uri
else:
model_uri = self.doc_model_uri
for text in texts:
request = TextEmbeddingRequest(model_uri=model_uri, text=text)
stub = EmbeddingsServiceStub(channel)
res = stub.TextEmbedding(request, metadata=self.grpc_metadata) # type: ignore[attr-defined]
result.append(list(res.embedding))
time.sleep(self.sleep_interval)
return result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/fastembed.py | import importlib
import importlib.metadata
from typing import Any, Dict, List, Literal, Optional
import numpy as np
from langchain_core.embeddings import Embeddings
from langchain_core.utils import pre_init
from pydantic import BaseModel, ConfigDict
MIN_VERSION = "0.2.0"
class FastEmbedEmbeddings(BaseModel, Embeddings):
"""Qdrant FastEmbedding models.
FastEmbed is a lightweight, fast, Python library built for embedding generation.
See more documentation at:
* https://github.com/qdrant/fastembed/
* https://qdrant.github.io/fastembed/
To use this class, you must install the `fastembed` Python package.
`pip install fastembed`
Example:
from langchain_community.embeddings import FastEmbedEmbeddings
fastembed = FastEmbedEmbeddings()
"""
model_name: str = "BAAI/bge-small-en-v1.5"
"""Name of the FastEmbedding model to use
Defaults to "BAAI/bge-small-en-v1.5"
Find the list of supported models at
https://qdrant.github.io/fastembed/examples/Supported_Models/
"""
max_length: int = 512
"""The maximum number of tokens. Defaults to 512.
Unknown behavior for values > 512.
"""
cache_dir: Optional[str] = None
"""The path to the cache directory.
Defaults to `local_cache` in the parent directory
"""
threads: Optional[int] = None
"""The number of threads single onnxruntime session can use.
Defaults to None
"""
doc_embed_type: Literal["default", "passage"] = "default"
"""Type of embedding to use for documents
The available options are: "default" and "passage"
"""
batch_size: int = 256
"""Batch size for encoding. Higher values will use more memory, but be faster.
Defaults to 256.
"""
parallel: Optional[int] = None
"""If `>1`, parallel encoding is used, recommended for encoding of large datasets.
If `0`, use all available cores.
If `None`, don't use data-parallel processing, use default onnxruntime threading.
Defaults to `None`.
"""
model: Any = None # : :meta private:
model_config = ConfigDict(extra="allow", protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that FastEmbed has been installed."""
model_name = values.get("model_name")
max_length = values.get("max_length")
cache_dir = values.get("cache_dir")
threads = values.get("threads")
try:
fastembed = importlib.import_module("fastembed")
except ModuleNotFoundError:
raise ImportError(
"Could not import 'fastembed' Python package. "
"Please install it with `pip install fastembed`."
)
if importlib.metadata.version("fastembed") < MIN_VERSION:
raise ImportError(
'FastEmbedEmbeddings requires `pip install -U "fastembed>=0.2.0"`.'
)
values["model"] = fastembed.TextEmbedding(
model_name=model_name,
max_length=max_length,
cache_dir=cache_dir,
threads=threads,
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for documents using FastEmbed.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings: List[np.ndarray]
if self.doc_embed_type == "passage":
embeddings = self.model.passage_embed(
texts, batch_size=self.batch_size, parallel=self.parallel
)
else:
embeddings = self.model.embed(
texts, batch_size=self.batch_size, parallel=self.parallel
)
return [e.tolist() for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Generate query embeddings using FastEmbed.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query_embeddings: np.ndarray = next(
self.model.query_embed(
text, batch_size=self.batch_size, parallel=self.parallel
)
)
return query_embeddings.tolist()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/spacy_embeddings.py | import importlib.util
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, model_validator
class SpacyEmbeddings(BaseModel, Embeddings):
"""Embeddings by spaCy models.
Attributes:
model_name (str): Name of a spaCy model.
nlp (Any): The spaCy model loaded into memory.
Methods:
embed_documents(texts: List[str]) -> List[List[float]]:
Generates embeddings for a list of documents.
embed_query(text: str) -> List[float]:
Generates an embedding for a single piece of text.
"""
model_name: str = "en_core_web_sm"
nlp: Optional[Any] = None
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""
Validates that the spaCy package and the model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the spaCy package or the
model are not installed.
"""
if values.get("model_name") is None:
values["model_name"] = "en_core_web_sm"
model_name = values.get("model_name")
# Check if the spaCy package is installed
if importlib.util.find_spec("spacy") is None:
raise ValueError(
"SpaCy package not found. "
"Please install it with `pip install spacy`."
)
try:
# Try to load the spaCy model
import spacy
values["nlp"] = spacy.load(model_name) # type: ignore[arg-type]
except OSError:
# If the model is not found, raise a ValueError
raise ValueError(
f"SpaCy model '{model_name}' not found. "
f"Please install it with"
f" `python -m spacy download {model_name}`"
"or provide a valid spaCy model name."
)
return values # Return the validated values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generates embeddings for a list of documents.
Args:
texts (List[str]): The documents to generate embeddings for.
Returns:
A list of embeddings, one for each document.
"""
return [self.nlp(text).vector.tolist() for text in texts] # type: ignore[misc]
def embed_query(self, text: str) -> List[float]:
"""
Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text.
"""
return self.nlp(text).vector.tolist() # type: ignore[misc]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously generates embeddings for a list of documents.
This method is not implemented and raises a NotImplementedError.
Args:
texts (List[str]): The documents to generate embeddings for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
async def aembed_query(self, text: str) -> List[float]:
"""
Asynchronously generates an embedding for a single piece of text.
This method is not implemented and raises a NotImplementedError.
Args:
text (str): The text to generate an embedding for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/oracleai.py | # Authors:
# Harichandan Roy (hroy)
# David Jiang (ddjiang)
#
# -----------------------------------------------------------------------------
# oracleai.py
# -----------------------------------------------------------------------------
from __future__ import annotations
import json
import logging
import traceback
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
if TYPE_CHECKING:
from oracledb import Connection
logger = logging.getLogger(__name__)
"""OracleEmbeddings class"""
class OracleEmbeddings(BaseModel, Embeddings):
"""Get Embeddings"""
"""Oracle Connection"""
conn: Any = None
"""Embedding Parameters"""
params: Dict[str, Any]
"""Proxy"""
proxy: Optional[str] = None
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
model_config = ConfigDict(
extra="forbid",
)
"""
1 - user needs to have create procedure,
create mining model, create any directory privilege.
2 - grant create procedure, create mining model,
create any directory to <user>;
"""
@staticmethod
def load_onnx_model(
conn: Connection, dir: str, onnx_file: str, model_name: str
) -> None:
"""Load an ONNX model to Oracle Database.
Args:
conn: Oracle Connection,
dir: Oracle Directory,
onnx_file: ONNX file name,
model_name: Name of the model.
"""
try:
if conn is None or dir is None or onnx_file is None or model_name is None:
raise Exception("Invalid input")
cursor = conn.cursor()
cursor.execute(
"""
begin
dbms_data_mining.drop_model(model_name => :model, force => true);
SYS.DBMS_VECTOR.load_onnx_model(:path, :filename, :model,
json('{"function" : "embedding",
"embeddingOutput" : "embedding",
"input": {"input": ["DATA"]}}'));
end;""",
path=dir,
filename=onnx_file,
model=model_name,
)
cursor.close()
except Exception as ex:
logger.info(f"An exception occurred :: {ex}")
traceback.print_exc()
cursor.close()
raise
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using an OracleEmbeddings.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each input text.
"""
try:
import oracledb
except ImportError as e:
raise ImportError(
"Unable to import oracledb, please install with "
"`pip install -U oracledb`."
) from e
if texts is None:
return None
embeddings: List[List[float]] = []
try:
# returns strings or bytes instead of a locator
oracledb.defaults.fetch_lobs = False
cursor = self.conn.cursor()
if self.proxy:
cursor.execute(
"begin utl_http.set_proxy(:proxy); end;", proxy=self.proxy
)
chunks = []
for i, text in enumerate(texts, start=1):
chunk = {"chunk_id": i, "chunk_data": text}
chunks.append(json.dumps(chunk))
vector_array_type = self.conn.gettype("SYS.VECTOR_ARRAY_T")
inputs = vector_array_type.newobject(chunks)
cursor.execute(
"select t.* "
+ "from dbms_vector_chain.utl_to_embeddings(:content, "
+ "json(:params)) t",
content=inputs,
params=json.dumps(self.params),
)
for row in cursor:
if row is None:
embeddings.append([])
else:
rdata = json.loads(row[0])
# dereference string as array
vec = json.loads(rdata["embed_vector"])
embeddings.append(vec)
cursor.close()
return embeddings
except Exception as ex:
logger.info(f"An exception occurred :: {ex}")
traceback.print_exc()
cursor.close()
raise
def embed_query(self, text: str) -> List[float]:
"""Compute query embedding using an OracleEmbeddings.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
return self.embed_documents([text])[0]
# uncomment the following code block to run the test
"""
# A sample unit test.
import oracledb
# get the Oracle connection
conn = oracledb.connect(
user="<user>",
password="<password>",
dsn="<hostname>/<service_name>",
)
print("Oracle connection is established...")
# params
embedder_params = {"provider": "database", "model": "demo_model"}
proxy = ""
# instance
embedder = OracleEmbeddings(conn=conn, params=embedder_params, proxy=proxy)
docs = ["hello world!", "hi everyone!", "greetings!"]
embeds = embedder.embed_documents(docs)
print(f"Total Embeddings: {len(embeds)}")
print(f"Embedding generated by OracleEmbeddings: {embeds[0]}\n")
embed = embedder.embed_query("Hello World!")
print(f"Embedding generated by OracleEmbeddings: {embed}")
conn.close()
print("Connection is closed.")
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/gigachat.py | from __future__ import annotations
import logging
from functools import cached_property
from typing import Any, Dict, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import pre_init
from langchain_core.utils.pydantic import get_fields
from pydantic import BaseModel
logger = logging.getLogger(__name__)
MAX_BATCH_SIZE_CHARS = 1000000
MAX_BATCH_SIZE_PARTS = 90
@deprecated(
since="0.3.5",
removal="1.0",
alternative_import="langchain_gigachat.GigaChatEmbeddings",
)
class GigaChatEmbeddings(BaseModel, Embeddings):
"""GigaChat Embeddings models.
Example:
.. code-block:: python
from langchain_community.embeddings.gigachat import GigaChatEmbeddings
embeddings = GigaChatEmbeddings(
credentials=..., scope=..., verify_ssl_certs=False
)
"""
base_url: Optional[str] = None
""" Base API URL """
auth_url: Optional[str] = None
""" Auth URL """
credentials: Optional[str] = None
""" Auth Token """
scope: Optional[str] = None
""" Permission scope for access token """
access_token: Optional[str] = None
""" Access token for GigaChat """
model: Optional[str] = None
"""Model name to use."""
user: Optional[str] = None
""" Username for authenticate """
password: Optional[str] = None
""" Password for authenticate """
timeout: Optional[float] = 600
""" Timeout for request. By default it works for long requests. """
verify_ssl_certs: Optional[bool] = None
""" Check certificates for all requests """
ca_bundle_file: Optional[str] = None
cert_file: Optional[str] = None
key_file: Optional[str] = None
key_file_password: Optional[str] = None
# Support for connection to GigaChat through SSL certificates
@cached_property
def _client(self) -> Any:
"""Returns GigaChat API client"""
import gigachat
return gigachat.GigaChat(
base_url=self.base_url,
auth_url=self.auth_url,
credentials=self.credentials,
scope=self.scope,
access_token=self.access_token,
model=self.model,
user=self.user,
password=self.password,
timeout=self.timeout,
verify_ssl_certs=self.verify_ssl_certs,
ca_bundle_file=self.ca_bundle_file,
cert_file=self.cert_file,
key_file=self.key_file,
key_file_password=self.key_file_password,
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate authenticate data in environment and python package is installed."""
try:
import gigachat # noqa: F401
except ImportError:
raise ImportError(
"Could not import gigachat python package. "
"Please install it with `pip install gigachat`."
)
fields = set(get_fields(cls).keys())
diff = set(values.keys()) - fields
if diff:
logger.warning(f"Extra fields {diff} in GigaChat class")
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a GigaChat embeddings models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
result: List[List[float]] = []
size = 0
local_texts = []
embed_kwargs = {}
if self.model is not None:
embed_kwargs["model"] = self.model
for text in texts:
local_texts.append(text)
size += len(text)
if size > MAX_BATCH_SIZE_CHARS or len(local_texts) > MAX_BATCH_SIZE_PARTS:
for embedding in self._client.embeddings(
texts=local_texts, **embed_kwargs
).data:
result.append(embedding.embedding)
size = 0
local_texts = []
# Call for last iteration
if local_texts:
for embedding in self._client.embeddings(
texts=local_texts, **embed_kwargs
).data:
result.append(embedding.embedding)
return result
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a GigaChat embeddings models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
result: List[List[float]] = []
size = 0
local_texts = []
embed_kwargs = {}
if self.model is not None:
embed_kwargs["model"] = self.model
for text in texts:
local_texts.append(text)
size += len(text)
if size > MAX_BATCH_SIZE_CHARS or len(local_texts) > MAX_BATCH_SIZE_PARTS:
embeddings = await self._client.aembeddings(
texts=local_texts, **embed_kwargs
)
for embedding in embeddings.data:
result.append(embedding.embedding)
size = 0
local_texts = []
# Call for last iteration
if local_texts:
embeddings = await self._client.aembeddings(
texts=local_texts, **embed_kwargs
)
for embedding in embeddings.data:
result.append(embedding.embedding)
return result
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a GigaChat embeddings models.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents(texts=[text])[0]
async def aembed_query(self, text: str) -> List[float]:
"""Embed a query using a GigaChat embeddings models.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
docs = await self.aembed_documents(texts=[text])
return docs[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/premai.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional, Union
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, SecretStr
logger = logging.getLogger(__name__)
class PremAIEmbeddings(BaseModel, Embeddings):
"""Prem's Embedding APIs"""
project_id: int
"""The project ID in which the experiments or deployments are carried out.
You can find all your projects here: https://app.premai.io/projects/"""
premai_api_key: Optional[SecretStr] = None
"""Prem AI API Key. Get it here: https://app.premai.io/api_keys/"""
model: str
"""The Embedding model to choose from"""
show_progress_bar: bool = False
"""Whether to show a tqdm progress bar. Must have `tqdm` installed."""
max_retries: int = 1
"""Max number of retries for tenacity"""
client: Any
@pre_init
def validate_environments(cls, values: Dict) -> Dict:
"""Validate that the package is installed and that the API token is valid"""
try:
from premai import Prem
except ImportError as error:
raise ImportError(
"Could not import Prem Python package."
"Please install it with: `pip install premai`"
) from error
try:
premai_api_key = get_from_dict_or_env(
values, "premai_api_key", "PREMAI_API_KEY"
)
values["client"] = Prem(api_key=premai_api_key)
except Exception as error:
raise ValueError("Your API Key is incorrect. Please try again.") from error
return values
def embed_query(self, text: str) -> List[float]:
"""Embed query text"""
embeddings = embed_with_retry(
self, model=self.model, project_id=self.project_id, input=text
)
return embeddings.data[0].embedding
def embed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings = embed_with_retry(
self, model=self.model, project_id=self.project_id, input=texts
).data
return [embedding.embedding for embedding in embeddings]
def create_prem_retry_decorator(
embedder: PremAIEmbeddings,
*,
max_retries: int = 1,
) -> Callable[[Any], Any]:
"""Create a retry decorator for PremAIEmbeddings.
Args:
embedder (PremAIEmbeddings): The PremAIEmbeddings instance
max_retries (int): The maximum number of retries
Returns:
Callable[[Any], Any]: The retry decorator
"""
import premai.models
errors = [
premai.models.api_response_validation_error.APIResponseValidationError,
premai.models.conflict_error.ConflictError,
premai.models.model_not_found_error.ModelNotFoundError,
premai.models.permission_denied_error.PermissionDeniedError,
premai.models.provider_api_connection_error.ProviderAPIConnectionError,
premai.models.provider_api_status_error.ProviderAPIStatusError,
premai.models.provider_api_timeout_error.ProviderAPITimeoutError,
premai.models.provider_internal_server_error.ProviderInternalServerError,
premai.models.provider_not_found_error.ProviderNotFoundError,
premai.models.rate_limit_error.RateLimitError,
premai.models.unprocessable_entity_error.UnprocessableEntityError,
premai.models.validation_error.ValidationError,
]
decorator = create_base_retry_decorator(
error_types=errors, max_retries=max_retries, run_manager=None
)
return decorator
def embed_with_retry(
embedder: PremAIEmbeddings,
model: str,
project_id: int,
input: Union[str, List[str]],
) -> Any:
"""Using tenacity for retry in embedding calls"""
retry_decorator = create_prem_retry_decorator(
embedder, max_retries=embedder.max_retries
)
@retry_decorator
def _embed_with_retry(
embedder: PremAIEmbeddings,
project_id: int,
model: str,
input: Union[str, List[str]],
) -> Any:
embedding_response = embedder.client.embeddings.create(
project_id=project_id, model=model, input=input
)
return embedding_response
return _embed_with_retry(embedder, project_id=project_id, model=model, input=input)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/voyageai.py | from __future__ import annotations
import json
import logging
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Union,
cast,
)
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: VoyageEmbeddings) -> Callable[[Any], Any]:
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _check_response(response: dict) -> dict:
if "data" not in response:
raise RuntimeError(f"Voyage API Error. Message: {json.dumps(response)}")
return response
def embed_with_retry(embeddings: VoyageEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
response = requests.post(**kwargs)
return _check_response(response.json())
return _embed_with_retry(**kwargs)
@deprecated(
since="0.0.29",
removal="1.0",
alternative_import="langchain_voyageai.VoyageAIEmbeddings",
)
class VoyageEmbeddings(BaseModel, Embeddings):
"""Voyage embedding models.
To use, you should have the environment variable ``VOYAGE_API_KEY`` set with
your API key or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import VoyageEmbeddings
voyage = VoyageEmbeddings(voyage_api_key="your-api-key", model="voyage-2")
text = "This is a test query."
query_result = voyage.embed_query(text)
"""
model: str
voyage_api_base: str = "https://api.voyageai.com/v1/embeddings"
voyage_api_key: Optional[SecretStr] = None
batch_size: int
"""Maximum number of texts to embed in each API request."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout in seconds for the API request."""
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding. Must have tqdm installed if set
to True."""
truncation: bool = True
"""Whether to truncate the input texts to fit within the context length.
If True, over-length input texts will be truncated to fit within the context
length, before vectorized by the embedding model. If False, an error will be
raised if any given text exceeds the context length."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["voyage_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "voyage_api_key", "VOYAGE_API_KEY")
)
if "model" not in values:
values["model"] = "voyage-01"
logger.warning(
"model will become a required arg for VoyageAIEmbeddings, "
"we recommend to specify it when using this class. "
"Currently the default is set to voyage-01."
)
if "batch_size" not in values:
values["batch_size"] = (
72
if "model" in values and (values["model"] in ["voyage-2", "voyage-02"])
else 7
)
return values
def _invocation_params(
self, input: List[str], input_type: Optional[str] = None
) -> Dict:
api_key = cast(SecretStr, self.voyage_api_key).get_secret_value()
params: Dict = {
"url": self.voyage_api_base,
"headers": {"Authorization": f"Bearer {api_key}"},
"json": {
"model": self.model,
"input": input,
"input_type": input_type,
"truncation": self.truncation,
},
"timeout": self.request_timeout,
}
return params
def _get_embeddings(
self,
texts: List[str],
batch_size: Optional[int] = None,
input_type: Optional[str] = None,
) -> List[List[float]]:
embeddings: List[List[float]] = []
if batch_size is None:
batch_size = self.batch_size
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
except ImportError as e:
raise ImportError(
"Must have tqdm installed if `show_progress_bar` is set to True. "
"Please install with `pip install tqdm`."
) from e
_iter = tqdm(range(0, len(texts), batch_size))
else:
_iter = range(0, len(texts), batch_size)
if input_type and input_type not in ["query", "document"]:
raise ValueError(
f"input_type {input_type} is invalid. Options: None, 'query', "
"'document'."
)
for i in _iter:
response = embed_with_retry(
self,
**self._invocation_params(
input=texts[i : i + batch_size], input_type=input_type
),
)
embeddings.extend(r["embedding"] for r in response["data"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Voyage Embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._get_embeddings(
texts, batch_size=self.batch_size, input_type="document"
)
def embed_query(self, text: str) -> List[float]:
"""Call out to Voyage Embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
return self._get_embeddings(
[text], batch_size=self.batch_size, input_type="query"
)[0]
def embed_general_texts(
self, texts: List[str], *, input_type: Optional[str] = None
) -> List[List[float]]:
"""Call out to Voyage Embedding endpoint for embedding general text.
Args:
texts: The list of texts to embed.
input_type: Type of the input text. Default to None, meaning the type is
unspecified. Other options: query, document.
Returns:
Embedding for the text.
"""
return self._get_embeddings(
texts, batch_size=self.batch_size, input_type=input_type
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/nlpcloud.py | from typing import Any, Dict, List
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict
class NLPCloudEmbeddings(BaseModel, Embeddings):
"""NLP Cloud embedding models.
To use, you should have the nlpcloud python package installed
Example:
.. code-block:: python
from langchain_community.embeddings import NLPCloudEmbeddings
embeddings = NLPCloudEmbeddings()
"""
model_name: str # Define model_name as a class attribute
gpu: bool # Define gpu as a class attribute
client: Any #: :meta private:
model_config = ConfigDict(protected_namespaces=())
def __init__(
self,
model_name: str = "paraphrase-multilingual-mpnet-base-v2",
gpu: bool = False,
**kwargs: Any,
) -> None:
super().__init__(model_name=model_name, gpu=gpu, **kwargs)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"], nlpcloud_api_key, gpu=values["gpu"], lang="en"
)
except ImportError:
raise ImportError(
"Could not import nlpcloud python package. "
"Please install it with `pip install nlpcloud`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using NLP Cloud.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.client.embeddings(texts)["embeddings"]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using NLP Cloud.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.client.embeddings([text])["embeddings"][0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/sentence_transformer.py | """HuggingFace sentence_transformer embedding models."""
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
SentenceTransformerEmbeddings = HuggingFaceEmbeddings
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/ipex_llm.py | # This file is adapted from
# https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/embeddings/huggingface.py
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, Field
DEFAULT_BGE_MODEL = "BAAI/bge-small-en-v1.5"
DEFAULT_QUERY_BGE_INSTRUCTION_EN = (
"Represent this question for searching relevant passages: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:"
class IpexLLMBgeEmbeddings(BaseModel, Embeddings):
"""Wrapper around the BGE embedding model
with IPEX-LLM optimizations on Intel CPUs and GPUs.
To use, you should have the ``ipex-llm``
and ``sentence_transformers`` package installed. Refer to
`here <https://python.langchain.com/v0.1/docs/integrations/text_embedding/ipex_llm/>`_
for installation on Intel CPU.
Example on Intel CPU:
.. code-block:: python
from langchain_community.embeddings import IpexLLMBgeEmbeddings
embedding_model = IpexLLMBgeEmbeddings(
model_name="BAAI/bge-large-en-v1.5",
model_kwargs={},
encode_kwargs={"normalize_embeddings": True},
)
Refer to
`here <https://python.langchain.com/v0.1/docs/integrations/text_embedding/ipex_llm_gpu/>`_
for installation on Intel GPU.
Example on Intel GPU:
.. code-block:: python
from langchain_community.embeddings import IpexLLMBgeEmbeddings
embedding_model = IpexLLMBgeEmbeddings(
model_name="BAAI/bge-large-en-v1.5",
model_kwargs={"device": "xpu"},
encode_kwargs={"normalize_embeddings": True},
)
"""
client: Any = None #: :meta private:
model_name: str = DEFAULT_BGE_MODEL
"""Model name to use."""
cache_folder: Optional[str] = None
"""Path to store models.
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass to the model."""
encode_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Keyword arguments to pass when calling the `encode` method of the model."""
query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN
"""Instruction to use for embedding query."""
embed_instruction: str = ""
"""Instruction to use for embedding document."""
def __init__(self, **kwargs: Any):
"""Initialize the sentence_transformer."""
super().__init__(**kwargs)
try:
import sentence_transformers
from ipex_llm.transformers.convert import _optimize_post, _optimize_pre
except ImportError as exc:
base_url = (
"https://python.langchain.com/v0.1/docs/integrations/text_embedding/"
)
raise ImportError(
"Could not import ipex_llm or sentence_transformers. "
f"Please refer to {base_url}/ipex_llm/ "
"for install required packages on Intel CPU. "
f"And refer to {base_url}/ipex_llm_gpu/ "
"for install required packages on Intel GPU. "
) from exc
# Set "cpu" as default device
if "device" not in self.model_kwargs:
self.model_kwargs["device"] = "cpu"
if self.model_kwargs["device"] not in ["cpu", "xpu"]:
raise ValueError(
"IpexLLMBgeEmbeddings currently only supports device to be "
f"'cpu' or 'xpu', but you have: {self.model_kwargs['device']}."
)
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
# Add ipex-llm optimizations
self.client = _optimize_pre(self.client)
self.client = _optimize_post(self.client)
if self.model_kwargs["device"] == "xpu":
self.client = self.client.half().to("xpu")
if "-zh" in self.model_name:
self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = [self.embed_instruction + t.replace("\n", " ") for t in texts]
embeddings = self.client.encode(texts, **self.encode_kwargs)
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a HuggingFace transformer model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
embedding = self.client.encode(
self.query_instruction + text, **self.encode_kwargs
)
return embedding.tolist()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/vertexai.py | import logging
import re
import string
import threading
from concurrent.futures import ThreadPoolExecutor, wait
from typing import Any, Dict, List, Literal, Optional, Tuple
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.utils import pre_init
from langchain_community.llms.vertexai import _VertexAICommon
from langchain_community.utilities.vertexai import raise_vertex_import_error
logger = logging.getLogger(__name__)
_MAX_TOKENS_PER_BATCH = 20000
_MAX_BATCH_SIZE = 250
_MIN_BATCH_SIZE = 5
@deprecated(
since="0.0.12",
removal="1.0",
alternative_import="langchain_google_vertexai.VertexAIEmbeddings",
)
class VertexAIEmbeddings(_VertexAICommon, Embeddings): # type: ignore[override]
"""Google Cloud VertexAI embedding models."""
# Instance context
instance: Dict[str, Any] = {} #: :meta private:
show_progress_bar: bool = False
"""Whether to show a tqdm progress bar. Must have `tqdm` installed."""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the python package exists in environment."""
cls._try_init_vertexai(values)
if values["model_name"] == "textembedding-gecko-default":
logger.warning(
"Model_name will become a required arg for VertexAIEmbeddings "
"starting from Feb-01-2024. Currently the default is set to "
"textembedding-gecko@001"
)
values["model_name"] = "textembedding-gecko@001"
try:
from vertexai.language_models import TextEmbeddingModel
except ImportError:
raise_vertex_import_error()
values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"])
return values
def __init__(
self,
# the default value would be removed after Feb-01-2024
model_name: str = "textembedding-gecko-default",
project: Optional[str] = None,
location: str = "us-central1",
request_parallelism: int = 5,
max_retries: int = 6,
credentials: Optional[Any] = None,
**kwargs: Any,
):
"""Initialize the sentence_transformer."""
super().__init__(
project=project,
location=location,
credentials=credentials,
request_parallelism=request_parallelism,
max_retries=max_retries,
model_name=model_name,
**kwargs,
)
self.instance["max_batch_size"] = kwargs.get("max_batch_size", _MAX_BATCH_SIZE)
self.instance["batch_size"] = self.instance["max_batch_size"]
self.instance["min_batch_size"] = kwargs.get("min_batch_size", _MIN_BATCH_SIZE)
self.instance["min_good_batch_size"] = self.instance["min_batch_size"]
self.instance["lock"] = threading.Lock()
self.instance["batch_size_validated"] = False
self.instance["task_executor"] = ThreadPoolExecutor(
max_workers=request_parallelism
)
self.instance[
"embeddings_task_type_supported"
] = not self.client._endpoint_name.endswith("/textembedding-gecko@001")
@staticmethod
def _split_by_punctuation(text: str) -> List[str]:
"""Splits a string by punctuation and whitespace characters."""
split_by = string.punctuation + "\t\n "
pattern = f"([{split_by}])"
# Using re.split to split the text based on the pattern
return [segment for segment in re.split(pattern, text) if segment]
@staticmethod
def _prepare_batches(texts: List[str], batch_size: int) -> List[List[str]]:
"""Splits texts in batches based on current maximum batch size
and maximum tokens per request.
"""
text_index = 0
texts_len = len(texts)
batch_token_len = 0
batches: List[List[str]] = []
current_batch: List[str] = []
if texts_len == 0:
return []
while text_index < texts_len:
current_text = texts[text_index]
# Number of tokens per a text is conservatively estimated
# as 2 times number of words, punctuation and whitespace characters.
# Using `count_tokens` API will make batching too expensive.
# Utilizing a tokenizer, would add a dependency that would not
# necessarily be reused by the application using this class.
current_text_token_cnt = (
len(VertexAIEmbeddings._split_by_punctuation(current_text)) * 2
)
end_of_batch = False
if current_text_token_cnt > _MAX_TOKENS_PER_BATCH:
# Current text is too big even for a single batch.
# Such request will fail, but we still make a batch
# so that the app can get the error from the API.
if len(current_batch) > 0:
# Adding current batch if not empty.
batches.append(current_batch)
current_batch = [current_text]
text_index += 1
end_of_batch = True
elif (
batch_token_len + current_text_token_cnt > _MAX_TOKENS_PER_BATCH
or len(current_batch) == batch_size
):
end_of_batch = True
else:
if text_index == texts_len - 1:
# Last element - even though the batch may be not big,
# we still need to make it.
end_of_batch = True
batch_token_len += current_text_token_cnt
current_batch.append(current_text)
text_index += 1
if end_of_batch:
batches.append(current_batch)
current_batch = []
batch_token_len = 0
return batches
def _get_embeddings_with_retry(
self, texts: List[str], embeddings_type: Optional[str] = None
) -> List[List[float]]:
"""Makes a Vertex AI model request with retry logic."""
from google.api_core.exceptions import (
Aborted,
DeadlineExceeded,
ResourceExhausted,
ServiceUnavailable,
)
errors = [
ResourceExhausted,
ServiceUnavailable,
Aborted,
DeadlineExceeded,
]
retry_decorator = create_base_retry_decorator(
error_types=errors, # type: ignore[arg-type]
max_retries=self.max_retries, # type: ignore[arg-type]
)
@retry_decorator
def _completion_with_retry(texts_to_process: List[str]) -> Any:
if embeddings_type and self.instance["embeddings_task_type_supported"]:
from vertexai.language_models import TextEmbeddingInput
requests = [
TextEmbeddingInput(text=t, task_type=embeddings_type)
for t in texts_to_process
]
else:
requests = texts_to_process
embeddings = self.client.get_embeddings(requests)
return [embs.values for embs in embeddings]
return _completion_with_retry(texts)
def _prepare_and_validate_batches(
self, texts: List[str], embeddings_type: Optional[str] = None
) -> Tuple[List[List[float]], List[List[str]]]:
"""Prepares text batches with one-time validation of batch size.
Batch size varies between GCP regions and individual project quotas.
# Returns embeddings of the first text batch that went through,
# and text batches for the rest of the texts.
"""
from google.api_core.exceptions import InvalidArgument
batches = VertexAIEmbeddings._prepare_batches(
texts, self.instance["batch_size"]
)
# If batch size if less or equal to one that went through before,
# then keep batches as they are.
if len(batches[0]) <= self.instance["min_good_batch_size"]:
return [], batches
with self.instance["lock"]:
# If largest possible batch size was validated
# while waiting for the lock, then check for rebuilding
# our batches, and return.
if self.instance["batch_size_validated"]:
if len(batches[0]) <= self.instance["batch_size"]:
return [], batches
else:
return [], VertexAIEmbeddings._prepare_batches(
texts, self.instance["batch_size"]
)
# Figure out largest possible batch size by trying to push
# batches and lowering their size in half after every failure.
first_batch = batches[0]
first_result = []
had_failure = False
while True:
try:
first_result = self._get_embeddings_with_retry(
first_batch, embeddings_type
)
break
except InvalidArgument:
had_failure = True
first_batch_len = len(first_batch)
if first_batch_len == self.instance["min_batch_size"]:
raise
first_batch_len = max(
self.instance["min_batch_size"], int(first_batch_len / 2)
)
first_batch = first_batch[:first_batch_len]
first_batch_len = len(first_batch)
self.instance["min_good_batch_size"] = max(
self.instance["min_good_batch_size"], first_batch_len
)
# If had a failure and recovered
# or went through with the max size, then it's a legit batch size.
if had_failure or first_batch_len == self.instance["max_batch_size"]:
self.instance["batch_size"] = first_batch_len
self.instance["batch_size_validated"] = True
# If batch size was updated,
# rebuild batches with the new batch size
# (texts that went through are excluded here).
if first_batch_len != self.instance["max_batch_size"]:
batches = VertexAIEmbeddings._prepare_batches(
texts[first_batch_len:], self.instance["batch_size"]
)
else:
# Still figuring out max batch size.
batches = batches[1:]
# Returning embeddings of the first text batch that went through,
# and text batches for the rest of texts.
return first_result, batches
def embed(
self,
texts: List[str],
batch_size: int = 0,
embeddings_task_type: Optional[
Literal[
"RETRIEVAL_QUERY",
"RETRIEVAL_DOCUMENT",
"SEMANTIC_SIMILARITY",
"CLASSIFICATION",
"CLUSTERING",
]
] = None,
) -> List[List[float]]:
"""Embed a list of strings.
Args:
texts: List[str] The list of strings to embed.
batch_size: [int] The batch size of embeddings to send to the model.
If zero, then the largest batch size will be detected dynamically
at the first request, starting from 250, down to 5.
embeddings_task_type: [str] optional embeddings task type,
one of the following
RETRIEVAL_QUERY - Text is a query
in a search/retrieval setting.
RETRIEVAL_DOCUMENT - Text is a document
in a search/retrieval setting.
SEMANTIC_SIMILARITY - Embeddings will be used
for Semantic Textual Similarity (STS).
CLASSIFICATION - Embeddings will be used for classification.
CLUSTERING - Embeddings will be used for clustering.
Returns:
List of embeddings, one for each text.
"""
if len(texts) == 0:
return []
embeddings: List[List[float]] = []
first_batch_result: List[List[float]] = []
if batch_size > 0:
# Fixed batch size.
batches = VertexAIEmbeddings._prepare_batches(texts, batch_size)
else:
# Dynamic batch size, starting from 250 at the first call.
first_batch_result, batches = self._prepare_and_validate_batches(
texts, embeddings_task_type
)
# First batch result may have some embeddings already.
# In such case, batches have texts that were not processed yet.
embeddings.extend(first_batch_result)
tasks = []
if self.show_progress_bar:
try:
from tqdm import tqdm
iter_ = tqdm(batches, desc="VertexAIEmbeddings")
except ImportError:
logger.warning(
"Unable to show progress bar because tqdm could not be imported. "
"Please install with `pip install tqdm`."
)
iter_ = batches
else:
iter_ = batches
for batch in iter_:
tasks.append(
self.instance["task_executor"].submit(
self._get_embeddings_with_retry,
texts=batch,
embeddings_type=embeddings_task_type,
)
)
if len(tasks) > 0:
wait(tasks)
for t in tasks:
embeddings.extend(t.result())
return embeddings
def embed_documents(
self, texts: List[str], batch_size: int = 0
) -> List[List[float]]:
"""Embed a list of documents.
Args:
texts: List[str] The list of texts to embed.
batch_size: [int] The batch size of embeddings to send to the model.
If zero, then the largest batch size will be detected dynamically
at the first request, starting from 250, down to 5.
Returns:
List of embeddings, one for each text.
"""
return self.embed(texts, batch_size, "RETRIEVAL_DOCUMENT")
def embed_query(self, text: str) -> List[float]:
"""Embed a text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embeddings = self.embed([text], 1, "RETRIEVAL_QUERY")
return embeddings[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/edenai.py | from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import (
BaseModel,
ConfigDict,
Field,
SecretStr,
)
from langchain_community.utilities.requests import Requests
class EdenAiEmbeddings(BaseModel, Embeddings):
"""EdenAI embedding.
environment variable ``EDENAI_API_KEY`` set with your API key, or pass
it as a named parameter.
"""
edenai_api_key: Optional[SecretStr] = Field(None, description="EdenAI API Token")
provider: str = "openai"
"""embedding provider to use (eg: openai,google etc.)"""
model: Optional[str] = None
"""
model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "edenai_api_key", "EDENAI_API_KEY")
)
return values
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Compute embeddings using EdenAi api."""
url = "https://api.edenai.run/v2/text/embeddings"
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.edenai_api_key.get_secret_value()}", # type: ignore[union-attr]
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {"texts": texts, "providers": self.provider}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
temp = response.json()
provider_response = temp[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
embeddings = []
for embed_item in temp[self.provider]["items"]:
embedding = embed_item["embedding"]
embeddings.append(embedding)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a query using EdenAI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._generate_embeddings([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/huggingface_hub.py | import json
from typing import Any, Dict, List, Optional
from langchain_core._api import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Self
DEFAULT_MODEL = "sentence-transformers/all-mpnet-base-v2"
VALID_TASKS = ("feature-extraction",)
@deprecated(
since="0.2.2",
removal="1.0",
alternative_import="langchain_huggingface.HuggingFaceEndpointEmbeddings",
)
class HuggingFaceHubEmbeddings(BaseModel, Embeddings):
"""HuggingFaceHub embedding models.
To use, you should have the ``huggingface_hub`` python package installed, and the
environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import HuggingFaceHubEmbeddings
model = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceHubEmbeddings(
model=model,
task="feature-extraction",
huggingfacehub_api_token="my-api-key",
)
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model: Optional[str] = None
"""Model name to use."""
repo_id: Optional[str] = None
"""Huggingfacehub repository id, for backward compatibility."""
task: Optional[str] = "feature-extraction"
"""Task to call the model with."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
huggingfacehub_api_token: Optional[str] = None
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(
values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN"
)
try:
from huggingface_hub import AsyncInferenceClient, InferenceClient
if values.get("model"):
values["repo_id"] = values["model"]
elif values.get("repo_id"):
values["model"] = values["repo_id"]
else:
values["model"] = DEFAULT_MODEL
values["repo_id"] = DEFAULT_MODEL
client = InferenceClient(
model=values["model"],
token=huggingfacehub_api_token,
)
async_client = AsyncInferenceClient(
model=values["model"],
token=huggingfacehub_api_token,
)
values["client"] = client
values["async_client"] = async_client
except ImportError:
raise ImportError(
"Could not import huggingface_hub python package. "
"Please install it with `pip install huggingface_hub`."
)
return values
@model_validator(mode="after")
def post_init(self) -> Self:
"""Post init validation for the class."""
if self.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {self.task}, "
f"currently only {VALID_TASKS} are supported"
)
return self
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
# replace newlines, which can negatively affect performance.
texts = [text.replace("\n", " ") for text in texts]
_model_kwargs = self.model_kwargs or {}
# api doc: https://huggingface.github.io/text-embeddings-inference/#/Text%20Embeddings%20Inference/embed
responses = self.client.post(
json={"inputs": texts, **_model_kwargs}, task=self.task
)
return json.loads(responses.decode())
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async Call to HuggingFaceHub's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
# replace newlines, which can negatively affect performance.
texts = [text.replace("\n", " ") for text in texts]
_model_kwargs = self.model_kwargs or {}
responses = await self.async_client.post(
json={"inputs": texts, "parameters": _model_kwargs}, task=self.task
)
return json.loads(responses.decode())
def embed_query(self, text: str) -> List[float]:
"""Call out to HuggingFaceHub's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
response = self.embed_documents([text])[0]
return response
async def aembed_query(self, text: str) -> List[float]:
"""Async Call to HuggingFaceHub's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
response = (await self.aembed_documents([text]))[0]
return response
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/xinference.py | """Wrapper around Xinference embedding models."""
from typing import Any, List, Optional
from langchain_core.embeddings import Embeddings
class XinferenceEmbeddings(Embeddings):
"""Xinference embedding models.
To use, you should have the xinference library installed:
.. code-block:: bash
pip install xinference
If you're simply using the services provided by Xinference, you can utilize the xinference_client package:
.. code-block:: bash
pip install xinference_client
Check out: https://github.com/xorbitsai/inference
To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers.
Example:
To start a local instance of Xinference, run
.. code-block:: bash
$ xinference
You can also deploy Xinference in a distributed cluster. Here are the steps:
Starting the supervisor:
.. code-block:: bash
$ xinference-supervisor
If you're simply using the services provided by Xinference, you can utilize the xinference_client package:
.. code-block:: bash
pip install xinference_client
Starting the worker:
.. code-block:: bash
$ xinference-worker
Then, launch a model using command line interface (CLI).
Example:
.. code-block:: bash
$ xinference launch -n orca -s 3 -q q4_0
It will return a model UID. Then you can use Xinference Embedding with LangChain.
Example:
.. code-block:: python
from langchain_community.embeddings import XinferenceEmbeddings
xinference = XinferenceEmbeddings(
server_url="http://0.0.0.0:9997",
model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
)
""" # noqa: E501
client: Any
server_url: Optional[str]
"""URL of the xinference server"""
model_uid: Optional[str]
"""UID of the launched model"""
def __init__(
self, server_url: Optional[str] = None, model_uid: Optional[str] = None
):
try:
from xinference.client import RESTfulClient
except ImportError:
try:
from xinference_client import RESTfulClient
except ImportError as e:
raise ImportError(
"Could not import RESTfulClient from xinference. Please install it"
" with `pip install xinference` or `pip install xinference_client`."
) from e
super().__init__()
if server_url is None:
raise ValueError("Please provide server URL")
if model_uid is None:
raise ValueError("Please provide the model UID")
self.server_url = server_url
self.model_uid = model_uid
self.client = RESTfulClient(server_url)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using Xinference.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
model = self.client.get_model(self.model_uid)
embeddings = [
model.create_embedding(text)["data"][0]["embedding"] for text in texts
]
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Embed a query of documents using Xinference.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
model = self.client.get_model(self.model_uid)
embedding_res = model.create_embedding(text)
embedding = embedding_res["data"][0]["embedding"]
return list(map(float, embedding))
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/anyscale.py | """Anyscale embeddings wrapper."""
from __future__ import annotations
from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "thenlper/gte-large"
class AnyscaleEmbeddings(OpenAIEmbeddings):
"""`Anyscale` Embeddings API."""
anyscale_api_key: Optional[SecretStr] = Field(default=None)
"""AnyScale Endpoints API keys."""
model: str = Field(default=DEFAULT_MODEL)
"""Model name to use."""
anyscale_api_base: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests."""
tiktoken_enabled: bool = False
"""Set this to False for non-OpenAI implementations of the embeddings API"""
embedding_ctx_length: int = 500
"""The maximum number of tokens to embed at once."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"anyscale_api_key": "ANYSCALE_API_KEY",
}
@pre_init
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
)
values["anyscale_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_API_BASE,
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility.
client_params = {
"api_key": values["anyscale_api_key"].get_secret_value(),
"base_url": values["anyscale_api_base"],
}
values["client"] = openai.OpenAI(**client_params).embeddings
else:
values["openai_api_base"] = values["anyscale_api_base"]
values["openai_api_key"] = values["anyscale_api_key"].get_secret_value()
values["client"] = openai.Embedding # type: ignore[attr-defined]
return values
@property
def _llm_type(self) -> str:
return "anyscale-embedding"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/itrex.py | import importlib.util
import os
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
class QuantizedBgeEmbeddings(BaseModel, Embeddings):
"""Leverage Itrex runtime to unlock the performance of compressed NLP models.
Please ensure that you have installed intel-extension-for-transformers.
Input:
model_name: str = Model name.
max_seq_len: int = The maximum sequence length for tokenization. (default 512)
pooling_strategy: str =
"mean" or "cls", pooling strategy for the final layer. (default "mean")
query_instruction: Optional[str] =
An instruction to add to the query before embedding. (default None)
document_instruction: Optional[str] =
An instruction to add to each document before embedding. (default None)
padding: Optional[bool] =
Whether to add padding during tokenization or not. (default True)
model_kwargs: Optional[Dict] =
Parameters to add to the model during initialization. (default {})
encode_kwargs: Optional[Dict] =
Parameters to add during the embedding forward pass. (default {})
onnx_file_name: Optional[str] =
File name of onnx optimized model which is exported by itrex.
(default "int8-model.onnx")
Example:
.. code-block:: python
from langchain_community.embeddings import QuantizedBgeEmbeddings
model_name = "Intel/bge-small-en-v1.5-sts-int8-static-inc"
encode_kwargs = {'normalize_embeddings': True}
hf = QuantizedBgeEmbeddings(
model_name,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages: "
)
""" # noqa: E501
def __init__(
self,
model_name: str,
*,
max_seq_len: int = 512,
pooling_strategy: str = "mean", # "mean" or "cls"
query_instruction: Optional[str] = None,
document_instruction: Optional[str] = None,
padding: bool = True,
model_kwargs: Optional[Dict] = None,
encode_kwargs: Optional[Dict] = None,
onnx_file_name: Optional[str] = "int8-model.onnx",
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
# check sentence_transformers python package
if importlib.util.find_spec("intel_extension_for_transformers") is None:
raise ImportError(
"Could not import intel_extension_for_transformers python package. "
"Please install it with "
"`pip install -U intel-extension-for-transformers`."
)
# check torch python package
if importlib.util.find_spec("torch") is None:
raise ImportError(
"Could not import torch python package. "
"Please install it with `pip install -U torch`."
)
# check onnx python package
if importlib.util.find_spec("onnx") is None:
raise ImportError(
"Could not import onnx python package. "
"Please install it with `pip install -U onnx`."
)
self.model_name_or_path = model_name
self.max_seq_len = max_seq_len
self.pooling = pooling_strategy
self.padding = padding
self.encode_kwargs = encode_kwargs or {}
self.model_kwargs = model_kwargs or {}
self.normalize = self.encode_kwargs.get("normalize_embeddings", False)
self.batch_size = self.encode_kwargs.get("batch_size", 32)
self.query_instruction = query_instruction
self.document_instruction = document_instruction
self.onnx_file_name = onnx_file_name
self.load_model()
def load_model(self) -> None:
from huggingface_hub import hf_hub_download
from intel_extension_for_transformers.transformers import AutoModel
from transformers import AutoConfig, AutoTokenizer
self.hidden_size = AutoConfig.from_pretrained(
self.model_name_or_path
).hidden_size
self.transformer_tokenizer = AutoTokenizer.from_pretrained(
self.model_name_or_path,
)
onnx_model_path = os.path.join(self.model_name_or_path, self.onnx_file_name) # type: ignore[arg-type]
if not os.path.exists(onnx_model_path):
onnx_model_path = hf_hub_download(
self.model_name_or_path, filename=self.onnx_file_name
)
self.transformer_model = AutoModel.from_pretrained(
onnx_model_path, use_embedding_runtime=True
)
model_config = ConfigDict(
extra="allow",
protected_namespaces=(),
)
def _embed(self, inputs: Any) -> Any:
import torch
engine_input = [value for value in inputs.values()]
outputs = self.transformer_model.generate(engine_input)
if "last_hidden_state:0" in outputs:
last_hidden_state = outputs["last_hidden_state:0"]
else:
last_hidden_state = [out for out in outputs.values()][0]
last_hidden_state = torch.tensor(last_hidden_state).reshape(
inputs["input_ids"].shape[0], inputs["input_ids"].shape[1], self.hidden_size
)
if self.pooling == "mean":
emb = self._mean_pooling(last_hidden_state, inputs["attention_mask"])
elif self.pooling == "cls":
emb = self._cls_pooling(last_hidden_state)
else:
raise ValueError("pooling method no supported")
if self.normalize:
emb = torch.nn.functional.normalize(emb, p=2, dim=1)
return emb
@staticmethod
def _cls_pooling(last_hidden_state: Any) -> Any:
return last_hidden_state[:, 0]
@staticmethod
def _mean_pooling(last_hidden_state: Any, attention_mask: Any) -> Any:
try:
import torch
except ImportError as e:
raise ImportError(
"Unable to import torch, please install with `pip install -U torch`."
) from e
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float()
)
sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def _embed_text(self, texts: List[str]) -> List[List[float]]:
inputs = self.transformer_tokenizer(
texts,
max_length=self.max_seq_len,
truncation=True,
padding=self.padding,
return_tensors="pt",
)
return self._embed(inputs).tolist()
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of text documents using the Optimized Embedder model.
Input:
texts: List[str] = List of text documents to embed.
Output:
List[List[float]] = The embeddings of each text document.
"""
try:
import pandas as pd
except ImportError as e:
raise ImportError(
"Unable to import pandas, please install with `pip install -U pandas`."
) from e
docs = [
self.document_instruction + d if self.document_instruction else d
for d in texts
]
# group into batches
text_list_df = pd.DataFrame(docs, columns=["texts"]).reset_index()
# assign each example with its batch
text_list_df["batch_index"] = text_list_df["index"] // self.batch_size
# create groups
batches = list(text_list_df.groupby(["batch_index"])["texts"].apply(list))
vectors = []
for batch in batches:
vectors += self._embed_text(batch)
return vectors
def embed_query(self, text: str) -> List[float]:
if self.query_instruction:
text = self.query_instruction + text
return self._embed_text([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/minimax.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, Field, SecretStr
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
class MiniMaxEmbeddings(BaseModel, Embeddings):
"""MiniMax embedding model integration.
Setup:
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token.
.. code-block:: bash
export MINIMAX_API_KEY="your-api-key"
export MINIMAX_GROUP_ID="your-group-id"
Key init args — completion params:
model: Optional[str]
Name of ZhipuAI model to use.
api_key: Optional[str]
Automatically inferred from env var `MINIMAX_GROUP_ID` if not provided.
group_id: Optional[str]
Automatically inferred from env var `MINIMAX_GROUP_ID` if not provided.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_community.embeddings import MiniMaxEmbeddings
embed = MiniMaxEmbeddings(
model="embo-01",
# api_key="...",
# group_id="...",
# other
)
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
embed.embed_query(input_text)
.. code-block:: python
[0.03016241, 0.03617699, 0.0017198119, -0.002061239, -0.00029994643, -0.0061320597, -0.0043635326, ...]
Embed multiple text:
.. code-block:: python
input_texts = ["This is a test query1.", "This is a test query2."]
embed.embed_documents(input_texts)
.. code-block:: python
[
[-0.0021588828, -0.007608119, 0.029349545, -0.0038194496, 0.008031177, -0.004529633, -0.020150753, ...],
[ -0.00023150232, -0.011122423, 0.016930554, 0.0083089275, 0.012633711, 0.019683322, -0.005971041, ...]
]
""" # noqa: E501
endpoint_url: str = "https://api.minimax.chat/v1/embeddings"
"""Endpoint URL to use."""
model: str = "embo-01"
"""Embeddings model name to use."""
embed_type_db: str = "db"
"""For embed_documents"""
embed_type_query: str = "query"
"""For embed_query"""
minimax_group_id: Optional[str] = Field(default=None, alias="group_id")
"""Group ID for MiniMax API."""
minimax_api_key: Optional[SecretStr] = Field(default=None, alias="api_key")
"""API Key for MiniMax API."""
model_config = ConfigDict(
populate_by_name=True,
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that group id and api key exists in environment."""
minimax_group_id = get_from_dict_or_env(
values, ["minimax_group_id", "group_id"], "MINIMAX_GROUP_ID"
)
minimax_api_key = convert_to_secret_str(
get_from_dict_or_env(
values, ["minimax_api_key", "api_key"], "MINIMAX_API_KEY"
)
)
values["minimax_group_id"] = minimax_group_id
values["minimax_api_key"] = minimax_api_key
return values
def embed(
self,
texts: List[str],
embed_type: str,
) -> List[List[float]]:
payload = {
"model": self.model,
"type": embed_type,
"texts": texts,
}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.minimax_api_key.get_secret_value()}", # type: ignore[union-attr]
"Content-Type": "application/json",
}
params = {
"GroupId": self.minimax_group_id,
}
# send request
response = requests.post(
self.endpoint_url, params=params, headers=headers, json=payload
)
parsed_response = response.json()
# check for errors
if parsed_response["base_resp"]["status_code"] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}"
)
embeddings = parsed_response["vectors"]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MiniMax embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(
self, texts=[text], embed_type=self.embed_type_query
)
return embeddings[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/cloudflare_workersai.py | from typing import Any, Dict, List
import requests
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
DEFAULT_MODEL_NAME = "@cf/baai/bge-base-en-v1.5"
class CloudflareWorkersAIEmbeddings(BaseModel, Embeddings):
"""Cloudflare Workers AI embedding model.
To use, you need to provide an API token and
account ID to access Cloudflare Workers AI.
Example:
.. code-block:: python
from langchain_community.embeddings import CloudflareWorkersAIEmbeddings
account_id = "my_account_id"
api_token = "my_secret_api_token"
model_name = "@cf/baai/bge-small-en-v1.5"
cf = CloudflareWorkersAIEmbeddings(
account_id=account_id,
api_token=api_token,
model_name=model_name
)
"""
api_base_url: str = "https://api.cloudflare.com/client/v4/accounts"
account_id: str
api_token: str
model_name: str = DEFAULT_MODEL_NAME
batch_size: int = 50
strip_new_lines: bool = True
headers: Dict[str, str] = {"Authorization": "Bearer "}
def __init__(self, **kwargs: Any):
"""Initialize the Cloudflare Workers AI client."""
super().__init__(**kwargs)
self.headers = {"Authorization": f"Bearer {self.api_token}"}
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using Cloudflare Workers AI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
if self.strip_new_lines:
texts = [text.replace("\n", " ") for text in texts]
batches = [
texts[i : i + self.batch_size]
for i in range(0, len(texts), self.batch_size)
]
embeddings = []
for batch in batches:
response = requests.post(
f"{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}",
headers=self.headers,
json={"text": batch},
)
embeddings.extend(response.json()["result"]["data"])
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using Cloudflare Workers AI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ") if self.strip_new_lines else text
response = requests.post(
f"{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}",
headers=self.headers,
json={"text": [text]},
)
return response.json()["result"]["data"][0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/sambanova.py | import json
from typing import Dict, Generator, List, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict
class SambaStudioEmbeddings(BaseModel, Embeddings):
"""SambaNova embedding models.
To use, you should have the environment variables
``SAMBASTUDIO_EMBEDDINGS_BASE_URL``, ``SAMBASTUDIO_EMBEDDINGS_BASE_URI``
``SAMBASTUDIO_EMBEDDINGS_PROJECT_ID``, ``SAMBASTUDIO_EMBEDDINGS_ENDPOINT_ID``,
``SAMBASTUDIO_EMBEDDINGS_API_KEY``
set with your personal sambastudio variable or pass it as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import SambaStudioEmbeddings
embeddings = SambaStudioEmbeddings(sambastudio_embeddings_base_url=base_url,
sambastudio_embeddings_base_uri=base_uri,
sambastudio_embeddings_project_id=project_id,
sambastudio_embeddings_endpoint_id=endpoint_id,
sambastudio_embeddings_api_key=api_key,
batch_size=32)
(or)
embeddings = SambaStudioEmbeddings(batch_size=32)
(or)
# CoE example
embeddings = SambaStudioEmbeddings(
batch_size=1,
model_kwargs={
'select_expert':'e5-mistral-7b-instruct'
}
)
"""
sambastudio_embeddings_base_url: str = ""
"""Base url to use"""
sambastudio_embeddings_base_uri: str = ""
"""endpoint base uri"""
sambastudio_embeddings_project_id: str = ""
"""Project id on sambastudio for model"""
sambastudio_embeddings_endpoint_id: str = ""
"""endpoint id on sambastudio for model"""
sambastudio_embeddings_api_key: str = ""
"""sambastudio api key"""
model_kwargs: dict = {}
"""Key word arguments to pass to the model."""
batch_size: int = 32
"""Batch size for the embedding models"""
model_config = ConfigDict(protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["sambastudio_embeddings_base_url"] = get_from_dict_or_env(
values, "sambastudio_embeddings_base_url", "SAMBASTUDIO_EMBEDDINGS_BASE_URL"
)
values["sambastudio_embeddings_base_uri"] = get_from_dict_or_env(
values,
"sambastudio_embeddings_base_uri",
"SAMBASTUDIO_EMBEDDINGS_BASE_URI",
default="api/predict/generic",
)
values["sambastudio_embeddings_project_id"] = get_from_dict_or_env(
values,
"sambastudio_embeddings_project_id",
"SAMBASTUDIO_EMBEDDINGS_PROJECT_ID",
)
values["sambastudio_embeddings_endpoint_id"] = get_from_dict_or_env(
values,
"sambastudio_embeddings_endpoint_id",
"SAMBASTUDIO_EMBEDDINGS_ENDPOINT_ID",
)
values["sambastudio_embeddings_api_key"] = get_from_dict_or_env(
values, "sambastudio_embeddings_api_key", "SAMBASTUDIO_EMBEDDINGS_API_KEY"
)
return values
def _get_tuning_params(self) -> str:
"""
Get the tuning parameters to use when calling the model
Returns:
The tuning parameters as a JSON string.
"""
if "api/v2/predict/generic" in self.sambastudio_embeddings_base_uri:
tuning_params_dict = self.model_kwargs
else:
tuning_params_dict = {
k: {"type": type(v).__name__, "value": str(v)}
for k, v in (self.model_kwargs.items())
}
tuning_params = json.dumps(tuning_params_dict)
return tuning_params
def _get_full_url(self, path: str) -> str:
"""
Return the full API URL for a given path.
:param str path: the sub-path
:returns: the full API URL for the sub-path
:rtype: str
"""
return f"{self.sambastudio_embeddings_base_url}/{self.sambastudio_embeddings_base_uri}/{path}" # noqa: E501
def _iterate_over_batches(self, texts: List[str], batch_size: int) -> Generator:
"""Generator for creating batches in the embed documents method
Args:
texts (List[str]): list of strings to embed
batch_size (int, optional): batch size to be used for the embedding model.
Will depend on the RDU endpoint used.
Yields:
List[str]: list (batch) of strings of size batch size
"""
for i in range(0, len(texts), batch_size):
yield texts[i : i + batch_size]
def embed_documents(
self, texts: List[str], batch_size: Optional[int] = None
) -> List[List[float]]:
"""Returns a list of embeddings for the given sentences.
Args:
texts (`List[str]`): List of texts to encode
batch_size (`int`): Batch size for the encoding
Returns:
`List[np.ndarray]` or `List[tensor]`: List of embeddings
for the given sentences
"""
if batch_size is None:
batch_size = self.batch_size
http_session = requests.Session()
url = self._get_full_url(
f"{self.sambastudio_embeddings_project_id}/{self.sambastudio_embeddings_endpoint_id}"
)
params = json.loads(self._get_tuning_params())
embeddings = []
if "api/predict/nlp" in self.sambastudio_embeddings_base_uri:
for batch in self._iterate_over_batches(texts, batch_size):
data = {"inputs": batch, "params": params}
response = http_session.post(
url,
headers={"key": self.sambastudio_embeddings_api_key},
json=data,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.\n Details: {response.text}"
)
try:
embedding = response.json()["data"]
embeddings.extend(embedding)
except KeyError:
raise KeyError(
"'data' not found in endpoint response",
response.json(),
)
elif "api/v2/predict/generic" in self.sambastudio_embeddings_base_uri:
for batch in self._iterate_over_batches(texts, batch_size):
items = [
{"id": f"item{i}", "value": item} for i, item in enumerate(batch)
]
data = {"items": items, "params": params}
response = http_session.post(
url,
headers={"key": self.sambastudio_embeddings_api_key},
json=data,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.\n Details: {response.text}"
)
try:
embedding = [item["value"] for item in response.json()["items"]]
embeddings.extend(embedding)
except KeyError:
raise KeyError(
"'items' not found in endpoint response",
response.json(),
)
elif "api/predict/generic" in self.sambastudio_embeddings_base_uri:
for batch in self._iterate_over_batches(texts, batch_size):
data = {"instances": batch, "params": params}
response = http_session.post(
url,
headers={"key": self.sambastudio_embeddings_api_key},
json=data,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.\n Details: {response.text}"
)
try:
if params.get("select_expert"):
embedding = response.json()["predictions"]
else:
embedding = response.json()["predictions"]
embeddings.extend(embedding)
except KeyError:
raise KeyError(
"'predictions' not found in endpoint response",
response.json(),
)
else:
raise ValueError(
f"handling of endpoint uri: {self.sambastudio_embeddings_base_uri} not implemented" # noqa: E501
)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Returns a list of embeddings for the given sentences.
Args:
sentences (`List[str]`): List of sentences to encode
Returns:
`List[np.ndarray]` or `List[tensor]`: List of embeddings
for the given sentences
"""
http_session = requests.Session()
url = self._get_full_url(
f"{self.sambastudio_embeddings_project_id}/{self.sambastudio_embeddings_endpoint_id}"
)
params = json.loads(self._get_tuning_params())
if "api/predict/nlp" in self.sambastudio_embeddings_base_uri:
data = {"inputs": [text], "params": params}
response = http_session.post(
url,
headers={"key": self.sambastudio_embeddings_api_key},
json=data,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.\n Details: {response.text}"
)
try:
embedding = response.json()["data"][0]
except KeyError:
raise KeyError(
"'data' not found in endpoint response",
response.json(),
)
elif "api/v2/predict/generic" in self.sambastudio_embeddings_base_uri:
data = {"items": [{"id": "item0", "value": text}], "params": params}
response = http_session.post(
url,
headers={"key": self.sambastudio_embeddings_api_key},
json=data,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.\n Details: {response.text}"
)
try:
embedding = response.json()["items"][0]["value"]
except KeyError:
raise KeyError(
"'items' not found in endpoint response",
response.json(),
)
elif "api/predict/generic" in self.sambastudio_embeddings_base_uri:
data = {"instances": [text], "params": params}
response = http_session.post(
url,
headers={"key": self.sambastudio_embeddings_api_key},
json=data,
)
if response.status_code != 200:
raise RuntimeError(
f"Sambanova /complete call failed with status code "
f"{response.status_code}.\n Details: {response.text}"
)
try:
if params.get("select_expert"):
embedding = response.json()["predictions"][0]
else:
embedding = response.json()["predictions"][0]
except KeyError:
raise KeyError(
"'predictions' not found in endpoint response",
response.json(),
)
else:
raise ValueError(
f"handling of endpoint uri: {self.sambastudio_embeddings_base_uri} not implemented" # noqa: E501
)
return embedding
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/azure_openai.py | """Azure OpenAI embeddings wrapper."""
from __future__ import annotations
import os
import warnings
from typing import Any, Awaitable, Callable, Dict, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.utils import get_from_dict_or_env
from pydantic import Field, model_validator
from typing_extensions import Self
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
@deprecated(
since="0.0.9",
removal="1.0",
alternative_import="langchain_openai.AzureOpenAIEmbeddings",
)
class AzureOpenAIEmbeddings(OpenAIEmbeddings): # type: ignore[override]
"""`Azure OpenAI` Embeddings API."""
azure_endpoint: Union[str, None] = None
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment: Optional[str] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
"""
azure_ad_token_provider: Union[Callable[[], str], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every sync request. For async requests,
will be invoked if `azure_ad_async_token_provider` is not provided.
"""
azure_ad_async_token_provider: Union[Callable[[], Awaitable[str]], None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every async request.
"""
openai_api_version: Optional[str] = Field(default=None, alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
validate_base_url: bool = True
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
values["openai_api_key"] = (
values.get("openai_api_key")
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
values["openai_api_base"] = values.get("openai_api_base") or os.getenv(
"OPENAI_API_BASE"
)
values["openai_api_version"] = values.get("openai_api_version") or os.getenv(
"OPENAI_API_VERSION", default="2023-05-15"
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
)
values["openai_organization"] = (
values.get("openai_organization")
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["azure_endpoint"] = values.get("azure_endpoint") or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values.get("azure_ad_token") or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
# Azure OpenAI embedding models allow a maximum of 2048 texts
# at a time in each batch
# See: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings
values["chunk_size"] = min(values["chunk_size"], 2048)
try:
import openai # noqa: F401
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
if "/openai" not in openai_api_base:
values["openai_api_base"] += "/openai"
warnings.warn(
"As of openai>=1.0.0, Azure endpoints should be specified via "
f"the `azure_endpoint` param not `openai_api_base` "
f"(or alias `base_url`). Updating `openai_api_base` from "
f"{openai_api_base} to {values['openai_api_base']}."
)
if values["deployment"]:
warnings.warn(
"As of openai>=1.0.0, if `deployment` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
if values["deployment"] not in values["openai_api_base"]:
warnings.warn(
"As of openai>=1.0.0, if `openai_api_base` "
"(or alias `base_url`) is specified it is expected to be "
"of the form "
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
f"Updating {openai_api_base} to "
f"{values['openai_api_base']}."
)
values["openai_api_base"] += (
"/deployments/" + values["deployment"]
)
values["deployment"] = None
return values
@model_validator(mode="after")
def post_init_validator(self) -> Self:
"""Validate that the base url is set."""
import openai
if is_openai_v1():
client_params = {
"api_version": self.openai_api_version,
"azure_endpoint": self.azure_endpoint,
"azure_deployment": self.deployment,
"api_key": self.openai_api_key,
"azure_ad_token": self.azure_ad_token,
"azure_ad_token_provider": self.azure_ad_token_provider,
"organization": self.openai_organization,
"base_url": self.openai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
"http_client": self.http_client,
}
self.client = openai.AzureOpenAI(**client_params).embeddings # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
if self.azure_ad_async_token_provider:
client_params["azure_ad_token_provider"] = (
self.azure_ad_async_token_provider
)
self.async_client = openai.AsyncAzureOpenAI(**client_params).embeddings # type: ignore[arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type, arg-type]
else:
self.client = openai.Embedding # type: ignore[attr-defined]
return self
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/ernie.py | import asyncio
import logging
import threading
from typing import Dict, List, Optional
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.runnables.config import run_in_executor
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict
logger = logging.getLogger(__name__)
@deprecated(
since="0.0.13",
alternative="langchain_community.embeddings.QianfanEmbeddingsEndpoint",
)
class ErnieEmbeddings(BaseModel, Embeddings):
"""`Ernie Embeddings V1` embedding models."""
ernie_api_base: Optional[str] = None
ernie_client_id: Optional[str] = None
ernie_client_secret: Optional[str] = None
access_token: Optional[str] = None
chunk_size: int = 16
model_name: str = "ErnieBot-Embedding-V1"
_lock = threading.Lock()
model_config = ConfigDict(protected_namespaces=())
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
values["ernie_api_base"] = get_from_dict_or_env(
values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com"
)
values["ernie_client_id"] = get_from_dict_or_env(
values,
"ernie_client_id",
"ERNIE_CLIENT_ID",
)
values["ernie_client_secret"] = get_from_dict_or_env(
values,
"ernie_client_secret",
"ERNIE_CLIENT_SECRET",
)
return values
def _embedding(self, json: object) -> dict:
base_url = (
f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings"
)
resp = requests.post(
f"{base_url}/embedding-v1",
headers={
"Content-Type": "application/json",
},
params={"access_token": self.access_token},
json=json,
)
return resp.json()
def _refresh_access_token_with_lock(self) -> None:
with self._lock:
logger.debug("Refreshing access token")
base_url: str = f"{self.ernie_api_base}/oauth/2.0/token"
resp = requests.post(
base_url,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
params={
"grant_type": "client_credentials",
"client_id": self.ernie_client_id,
"client_secret": self.ernie_client_secret,
},
)
self.access_token = str(resp.json().get("access_token"))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed search docs.
Args:
texts: The list of texts to embed
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
text_in_chunks = [
texts[i : i + self.chunk_size]
for i in range(0, len(texts), self.chunk_size)
]
lst = []
for chunk in text_in_chunks:
resp = self._embedding({"input": [text for text in chunk]})
if resp.get("error_code"):
if resp.get("error_code") == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text for text in chunk]})
else:
raise ValueError(f"Error from Ernie: {resp}")
lst.extend([i["embedding"] for i in resp["data"]])
return lst
def embed_query(self, text: str) -> List[float]:
"""Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text]})
if resp.get("error_code"):
if resp.get("error_code") == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text]})
else:
raise ValueError(f"Error from Ernie: {resp}")
return resp["data"][0]["embedding"]
async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
return await run_in_executor(None, self.embed_query, text)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Asynchronous Embed search docs.
Args:
texts: The list of texts to embed
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
result = await asyncio.gather(*[self.aembed_query(text) for text in texts])
return list(result)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/__init__.py | """**Embedding models** are wrappers around embedding models
from different APIs and services.
**Embedding models** can be LLMs or not.
**Class hierarchy:**
.. code-block::
Embeddings --> <name>Embeddings # Examples: OpenAIEmbeddings, HuggingFaceEmbeddings
"""
import importlib
import logging
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.embeddings.aleph_alpha import (
AlephAlphaAsymmetricSemanticEmbedding,
AlephAlphaSymmetricSemanticEmbedding,
)
from langchain_community.embeddings.anyscale import (
AnyscaleEmbeddings,
)
from langchain_community.embeddings.ascend import (
AscendEmbeddings,
)
from langchain_community.embeddings.awa import (
AwaEmbeddings,
)
from langchain_community.embeddings.azure_openai import (
AzureOpenAIEmbeddings,
)
from langchain_community.embeddings.baichuan import (
BaichuanTextEmbeddings,
)
from langchain_community.embeddings.baidu_qianfan_endpoint import (
QianfanEmbeddingsEndpoint,
)
from langchain_community.embeddings.bedrock import (
BedrockEmbeddings,
)
from langchain_community.embeddings.bookend import (
BookendEmbeddings,
)
from langchain_community.embeddings.clarifai import (
ClarifaiEmbeddings,
)
from langchain_community.embeddings.clova import (
ClovaEmbeddings,
)
from langchain_community.embeddings.cohere import (
CohereEmbeddings,
)
from langchain_community.embeddings.dashscope import (
DashScopeEmbeddings,
)
from langchain_community.embeddings.databricks import (
DatabricksEmbeddings,
)
from langchain_community.embeddings.deepinfra import (
DeepInfraEmbeddings,
)
from langchain_community.embeddings.edenai import (
EdenAiEmbeddings,
)
from langchain_community.embeddings.elasticsearch import (
ElasticsearchEmbeddings,
)
from langchain_community.embeddings.embaas import (
EmbaasEmbeddings,
)
from langchain_community.embeddings.ernie import (
ErnieEmbeddings,
)
from langchain_community.embeddings.fake import (
DeterministicFakeEmbedding,
FakeEmbeddings,
)
from langchain_community.embeddings.fastembed import (
FastEmbedEmbeddings,
)
from langchain_community.embeddings.gigachat import (
GigaChatEmbeddings,
)
from langchain_community.embeddings.google_palm import (
GooglePalmEmbeddings,
)
from langchain_community.embeddings.gpt4all import (
GPT4AllEmbeddings,
)
from langchain_community.embeddings.gradient_ai import (
GradientEmbeddings,
)
from langchain_community.embeddings.huggingface import (
HuggingFaceBgeEmbeddings,
HuggingFaceEmbeddings,
HuggingFaceInferenceAPIEmbeddings,
HuggingFaceInstructEmbeddings,
)
from langchain_community.embeddings.huggingface_hub import (
HuggingFaceHubEmbeddings,
)
from langchain_community.embeddings.infinity import (
InfinityEmbeddings,
)
from langchain_community.embeddings.infinity_local import (
InfinityEmbeddingsLocal,
)
from langchain_community.embeddings.ipex_llm import IpexLLMBgeEmbeddings
from langchain_community.embeddings.itrex import (
QuantizedBgeEmbeddings,
)
from langchain_community.embeddings.javelin_ai_gateway import (
JavelinAIGatewayEmbeddings,
)
from langchain_community.embeddings.jina import (
JinaEmbeddings,
)
from langchain_community.embeddings.johnsnowlabs import (
JohnSnowLabsEmbeddings,
)
from langchain_community.embeddings.laser import (
LaserEmbeddings,
)
from langchain_community.embeddings.llamacpp import (
LlamaCppEmbeddings,
)
from langchain_community.embeddings.llamafile import (
LlamafileEmbeddings,
)
from langchain_community.embeddings.llm_rails import (
LLMRailsEmbeddings,
)
from langchain_community.embeddings.localai import (
LocalAIEmbeddings,
)
from langchain_community.embeddings.minimax import (
MiniMaxEmbeddings,
)
from langchain_community.embeddings.mlflow import (
MlflowCohereEmbeddings,
MlflowEmbeddings,
)
from langchain_community.embeddings.mlflow_gateway import (
MlflowAIGatewayEmbeddings,
)
from langchain_community.embeddings.modelscope_hub import (
ModelScopeEmbeddings,
)
from langchain_community.embeddings.mosaicml import (
MosaicMLInstructorEmbeddings,
)
from langchain_community.embeddings.naver import (
ClovaXEmbeddings,
)
from langchain_community.embeddings.nemo import (
NeMoEmbeddings,
)
from langchain_community.embeddings.nlpcloud import (
NLPCloudEmbeddings,
)
from langchain_community.embeddings.oci_generative_ai import (
OCIGenAIEmbeddings,
)
from langchain_community.embeddings.octoai_embeddings import (
OctoAIEmbeddings,
)
from langchain_community.embeddings.ollama import (
OllamaEmbeddings,
)
from langchain_community.embeddings.openai import (
OpenAIEmbeddings,
)
from langchain_community.embeddings.openvino import (
OpenVINOBgeEmbeddings,
OpenVINOEmbeddings,
)
from langchain_community.embeddings.optimum_intel import (
QuantizedBiEncoderEmbeddings,
)
from langchain_community.embeddings.oracleai import (
OracleEmbeddings,
)
from langchain_community.embeddings.ovhcloud import (
OVHCloudEmbeddings,
)
from langchain_community.embeddings.premai import (
PremAIEmbeddings,
)
from langchain_community.embeddings.sagemaker_endpoint import (
SagemakerEndpointEmbeddings,
)
from langchain_community.embeddings.sambanova import (
SambaStudioEmbeddings,
)
from langchain_community.embeddings.self_hosted import (
SelfHostedEmbeddings,
)
from langchain_community.embeddings.self_hosted_hugging_face import (
SelfHostedHuggingFaceEmbeddings,
SelfHostedHuggingFaceInstructEmbeddings,
)
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.embeddings.solar import (
SolarEmbeddings,
)
from langchain_community.embeddings.spacy_embeddings import (
SpacyEmbeddings,
)
from langchain_community.embeddings.sparkllm import (
SparkLLMTextEmbeddings,
)
from langchain_community.embeddings.tensorflow_hub import (
TensorflowHubEmbeddings,
)
from langchain_community.embeddings.textembed import (
TextEmbedEmbeddings,
)
from langchain_community.embeddings.titan_takeoff import (
TitanTakeoffEmbed,
)
from langchain_community.embeddings.vertexai import (
VertexAIEmbeddings,
)
from langchain_community.embeddings.volcengine import (
VolcanoEmbeddings,
)
from langchain_community.embeddings.voyageai import (
VoyageEmbeddings,
)
from langchain_community.embeddings.xinference import (
XinferenceEmbeddings,
)
from langchain_community.embeddings.yandex import (
YandexGPTEmbeddings,
)
from langchain_community.embeddings.zhipuai import (
ZhipuAIEmbeddings,
)
__all__ = [
"AlephAlphaAsymmetricSemanticEmbedding",
"AlephAlphaSymmetricSemanticEmbedding",
"AnyscaleEmbeddings",
"AscendEmbeddings",
"AwaEmbeddings",
"AzureOpenAIEmbeddings",
"BaichuanTextEmbeddings",
"BedrockEmbeddings",
"BookendEmbeddings",
"ClarifaiEmbeddings",
"ClovaEmbeddings",
"ClovaXEmbeddings",
"CohereEmbeddings",
"DashScopeEmbeddings",
"DatabricksEmbeddings",
"DeepInfraEmbeddings",
"DeterministicFakeEmbedding",
"EdenAiEmbeddings",
"ElasticsearchEmbeddings",
"EmbaasEmbeddings",
"ErnieEmbeddings",
"FakeEmbeddings",
"FastEmbedEmbeddings",
"GPT4AllEmbeddings",
"GigaChatEmbeddings",
"GooglePalmEmbeddings",
"GradientEmbeddings",
"HuggingFaceBgeEmbeddings",
"HuggingFaceEmbeddings",
"HuggingFaceHubEmbeddings",
"HuggingFaceInferenceAPIEmbeddings",
"HuggingFaceInstructEmbeddings",
"InfinityEmbeddings",
"InfinityEmbeddingsLocal",
"IpexLLMBgeEmbeddings",
"JavelinAIGatewayEmbeddings",
"JinaEmbeddings",
"JohnSnowLabsEmbeddings",
"LLMRailsEmbeddings",
"LaserEmbeddings",
"LlamaCppEmbeddings",
"LlamafileEmbeddings",
"LocalAIEmbeddings",
"MiniMaxEmbeddings",
"MlflowAIGatewayEmbeddings",
"MlflowCohereEmbeddings",
"MlflowEmbeddings",
"ModelScopeEmbeddings",
"MosaicMLInstructorEmbeddings",
"NLPCloudEmbeddings",
"NeMoEmbeddings",
"OCIGenAIEmbeddings",
"OctoAIEmbeddings",
"OllamaEmbeddings",
"OpenAIEmbeddings",
"OpenVINOBgeEmbeddings",
"OpenVINOEmbeddings",
"OracleEmbeddings",
"OVHCloudEmbeddings",
"PremAIEmbeddings",
"QianfanEmbeddingsEndpoint",
"QuantizedBgeEmbeddings",
"QuantizedBiEncoderEmbeddings",
"SagemakerEndpointEmbeddings",
"SambaStudioEmbeddings",
"SelfHostedEmbeddings",
"SelfHostedHuggingFaceEmbeddings",
"SelfHostedHuggingFaceInstructEmbeddings",
"SentenceTransformerEmbeddings",
"SolarEmbeddings",
"SpacyEmbeddings",
"SparkLLMTextEmbeddings",
"TensorflowHubEmbeddings",
"TextEmbedEmbeddings",
"TitanTakeoffEmbed",
"VertexAIEmbeddings",
"VolcanoEmbeddings",
"VoyageEmbeddings",
"XinferenceEmbeddings",
"YandexGPTEmbeddings",
"ZhipuAIEmbeddings",
]
_module_lookup = {
"AlephAlphaAsymmetricSemanticEmbedding": "langchain_community.embeddings.aleph_alpha", # noqa: E501
"AlephAlphaSymmetricSemanticEmbedding": "langchain_community.embeddings.aleph_alpha", # noqa: E501
"AnyscaleEmbeddings": "langchain_community.embeddings.anyscale",
"AwaEmbeddings": "langchain_community.embeddings.awa",
"AzureOpenAIEmbeddings": "langchain_community.embeddings.azure_openai",
"BaichuanTextEmbeddings": "langchain_community.embeddings.baichuan",
"BedrockEmbeddings": "langchain_community.embeddings.bedrock",
"BookendEmbeddings": "langchain_community.embeddings.bookend",
"ClarifaiEmbeddings": "langchain_community.embeddings.clarifai",
"ClovaEmbeddings": "langchain_community.embeddings.clova",
"ClovaXEmbeddings": "langchain_community.embeddings.naver",
"CohereEmbeddings": "langchain_community.embeddings.cohere",
"DashScopeEmbeddings": "langchain_community.embeddings.dashscope",
"DatabricksEmbeddings": "langchain_community.embeddings.databricks",
"DeepInfraEmbeddings": "langchain_community.embeddings.deepinfra",
"DeterministicFakeEmbedding": "langchain_community.embeddings.fake",
"EdenAiEmbeddings": "langchain_community.embeddings.edenai",
"ElasticsearchEmbeddings": "langchain_community.embeddings.elasticsearch",
"EmbaasEmbeddings": "langchain_community.embeddings.embaas",
"ErnieEmbeddings": "langchain_community.embeddings.ernie",
"FakeEmbeddings": "langchain_community.embeddings.fake",
"FastEmbedEmbeddings": "langchain_community.embeddings.fastembed",
"GPT4AllEmbeddings": "langchain_community.embeddings.gpt4all",
"GooglePalmEmbeddings": "langchain_community.embeddings.google_palm",
"GradientEmbeddings": "langchain_community.embeddings.gradient_ai",
"GigaChatEmbeddings": "langchain_community.embeddings.gigachat",
"HuggingFaceBgeEmbeddings": "langchain_community.embeddings.huggingface",
"HuggingFaceEmbeddings": "langchain_community.embeddings.huggingface",
"HuggingFaceHubEmbeddings": "langchain_community.embeddings.huggingface_hub",
"HuggingFaceInferenceAPIEmbeddings": "langchain_community.embeddings.huggingface",
"HuggingFaceInstructEmbeddings": "langchain_community.embeddings.huggingface",
"InfinityEmbeddings": "langchain_community.embeddings.infinity",
"InfinityEmbeddingsLocal": "langchain_community.embeddings.infinity_local",
"IpexLLMBgeEmbeddings": "langchain_community.embeddings.ipex_llm",
"JavelinAIGatewayEmbeddings": "langchain_community.embeddings.javelin_ai_gateway",
"JinaEmbeddings": "langchain_community.embeddings.jina",
"JohnSnowLabsEmbeddings": "langchain_community.embeddings.johnsnowlabs",
"LLMRailsEmbeddings": "langchain_community.embeddings.llm_rails",
"LaserEmbeddings": "langchain_community.embeddings.laser",
"LlamaCppEmbeddings": "langchain_community.embeddings.llamacpp",
"LlamafileEmbeddings": "langchain_community.embeddings.llamafile",
"LocalAIEmbeddings": "langchain_community.embeddings.localai",
"MiniMaxEmbeddings": "langchain_community.embeddings.minimax",
"MlflowAIGatewayEmbeddings": "langchain_community.embeddings.mlflow_gateway",
"MlflowCohereEmbeddings": "langchain_community.embeddings.mlflow",
"MlflowEmbeddings": "langchain_community.embeddings.mlflow",
"ModelScopeEmbeddings": "langchain_community.embeddings.modelscope_hub",
"MosaicMLInstructorEmbeddings": "langchain_community.embeddings.mosaicml",
"NLPCloudEmbeddings": "langchain_community.embeddings.nlpcloud",
"NeMoEmbeddings": "langchain_community.embeddings.nemo",
"OCIGenAIEmbeddings": "langchain_community.embeddings.oci_generative_ai",
"OctoAIEmbeddings": "langchain_community.embeddings.octoai_embeddings",
"OllamaEmbeddings": "langchain_community.embeddings.ollama",
"OpenAIEmbeddings": "langchain_community.embeddings.openai",
"OpenVINOEmbeddings": "langchain_community.embeddings.openvino",
"OpenVINOBgeEmbeddings": "langchain_community.embeddings.openvino",
"QianfanEmbeddingsEndpoint": "langchain_community.embeddings.baidu_qianfan_endpoint", # noqa: E501
"QuantizedBgeEmbeddings": "langchain_community.embeddings.itrex",
"QuantizedBiEncoderEmbeddings": "langchain_community.embeddings.optimum_intel",
"OracleEmbeddings": "langchain_community.embeddings.oracleai",
"OVHCloudEmbeddings": "langchain_community.embeddings.ovhcloud",
"SagemakerEndpointEmbeddings": "langchain_community.embeddings.sagemaker_endpoint",
"SambaStudioEmbeddings": "langchain_community.embeddings.sambanova",
"SelfHostedEmbeddings": "langchain_community.embeddings.self_hosted",
"SelfHostedHuggingFaceEmbeddings": "langchain_community.embeddings.self_hosted_hugging_face", # noqa: E501
"SelfHostedHuggingFaceInstructEmbeddings": "langchain_community.embeddings.self_hosted_hugging_face", # noqa: E501
"SentenceTransformerEmbeddings": "langchain_community.embeddings.sentence_transformer", # noqa: E501
"SolarEmbeddings": "langchain_community.embeddings.solar",
"SpacyEmbeddings": "langchain_community.embeddings.spacy_embeddings",
"SparkLLMTextEmbeddings": "langchain_community.embeddings.sparkllm",
"TensorflowHubEmbeddings": "langchain_community.embeddings.tensorflow_hub",
"VertexAIEmbeddings": "langchain_community.embeddings.vertexai",
"VolcanoEmbeddings": "langchain_community.embeddings.volcengine",
"VoyageEmbeddings": "langchain_community.embeddings.voyageai",
"XinferenceEmbeddings": "langchain_community.embeddings.xinference",
"TextEmbedEmbeddings": "langchain_community.embeddings.textembed",
"TitanTakeoffEmbed": "langchain_community.embeddings.titan_takeoff",
"PremAIEmbeddings": "langchain_community.embeddings.premai",
"YandexGPTEmbeddings": "langchain_community.embeddings.yandex",
"AscendEmbeddings": "langchain_community.embeddings.ascend",
"ZhipuAIEmbeddings": "langchain_community.embeddings.zhipuai",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
logger = logging.getLogger(__name__)
# TODO: this is in here to maintain backwards compatibility
class HypotheticalDocumentEmbedder:
def __init__(self, *args: Any, **kwargs: Any):
logger.warning(
"Using a deprecated class. Please use "
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H(*args, **kwargs) # type: ignore
@classmethod
def from_llm(cls, *args: Any, **kwargs: Any) -> Any:
logger.warning(
"Using a deprecated class. Please use "
"`from langchain.chains import HypotheticalDocumentEmbedder` instead"
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H.from_llm(*args, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/mlflow.py | from __future__ import annotations
from typing import Any, Dict, Iterator, List
from urllib.parse import urlparse
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, PrivateAttr
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class MlflowEmbeddings(Embeddings, BaseModel):
"""Embedding LLMs in MLflow.
To use, you should have the `mlflow[genai]` python package installed.
For more information, see https://mlflow.org/docs/latest/llms/deployments.
Example:
.. code-block:: python
from langchain_community.embeddings import MlflowEmbeddings
embeddings = MlflowEmbeddings(
target_uri="http://localhost:5000",
endpoint="embeddings",
)
"""
endpoint: str
"""The endpoint to use."""
target_uri: str
"""The target URI to use."""
_client: Any = PrivateAttr()
"""The parameters to use for queries."""
query_params: Dict[str, str] = {}
"""The parameters to use for documents."""
documents_params: Dict[str, str] = {}
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._validate_uri()
try:
from mlflow.deployments import get_deploy_client
self._client = get_deploy_client(self.target_uri)
except ImportError as e:
raise ImportError(
"Failed to create the client. "
f"Please run `pip install mlflow{self._mlflow_extras}` to install "
"required dependencies."
) from e
@property
def _mlflow_extras(self) -> str:
return "[genai]"
def _validate_uri(self) -> None:
if self.target_uri == "databricks":
return
allowed = ["http", "https", "databricks"]
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f"Invalid target URI: {self.target_uri}. "
f"The scheme must be one of {allowed}."
)
def embed(self, texts: List[str], params: Dict[str, str]) -> List[List[float]]:
embeddings: List[List[float]] = []
for txt in _chunk(texts, 20):
resp = self._client.predict(
endpoint=self.endpoint,
inputs={"input": txt, **params}, # type: ignore[arg-type]
)
embeddings.extend(r["embedding"] for r in resp["data"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed(texts, params=self.documents_params)
def embed_query(self, text: str) -> List[float]:
return self.embed([text], params=self.query_params)[0]
class MlflowCohereEmbeddings(MlflowEmbeddings):
"""Cohere embedding LLMs in MLflow."""
query_params: Dict[str, str] = {"input_type": "search_query"}
documents_params: Dict[str, str] = {"input_type": "search_document"}
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/mlflow_gateway.py | from __future__ import annotations
import warnings
from typing import Any, Iterator, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class MlflowAIGatewayEmbeddings(Embeddings, BaseModel):
"""MLflow AI Gateway embeddings.
To use, you should have the ``mlflow[gateway]`` python package installed.
For more information, see https://mlflow.org/docs/latest/gateway/index.html.
Example:
.. code-block:: python
from langchain_community.embeddings import MlflowAIGatewayEmbeddings
embeddings = MlflowAIGatewayEmbeddings(
gateway_uri="<your-mlflow-ai-gateway-uri>",
route="<your-mlflow-ai-gateway-embeddings-route>"
)
"""
route: str
"""The route to use for the MLflow AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the MLflow AI Gateway API."""
def __init__(self, **kwargs: Any):
warnings.warn(
"`MlflowAIGatewayEmbeddings` is deprecated. Use `MlflowEmbeddings` or "
"`DatabricksEmbeddings` instead.",
DeprecationWarning,
)
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
def _query(self, texts: List[str]) -> List[List[float]]:
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
"Could not import `mlflow.gateway` module. "
"Please install it with `pip install mlflow[gateway]`."
) from e
embeddings = []
for txt in _chunk(texts, 20):
resp = mlflow.gateway.query(self.route, data={"text": txt})
# response is List[List[float]]
if isinstance(resp["embeddings"][0], List):
embeddings.extend(resp["embeddings"])
# response is List[float]
else:
embeddings.append(resp["embeddings"])
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._query(texts)
def embed_query(self, text: str) -> List[float]:
return self._query([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/awa.py | from typing import Any, Dict, List
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, model_validator
class AwaEmbeddings(BaseModel, Embeddings):
"""Embedding documents and queries with Awa DB.
Attributes:
client: The AwaEmbedding client.
model: The name of the model used for embedding.
Default is "all-mpnet-base-v2".
"""
client: Any #: :meta private:
model: str = "all-mpnet-base-v2"
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that awadb library is installed."""
try:
from awadb import AwaEmbedding
except ImportError as exc:
raise ImportError(
"Could not import awadb library. "
"Please install it with `pip install awadb`"
) from exc
values["client"] = AwaEmbedding()
return values
def set_model(self, model_name: str) -> None:
"""Set the model used for embedding.
The default model used is all-mpnet-base-v2
Args:
model_name: A string which represents the name of model.
"""
self.model = model_name
self.client.model_name = model_name
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using AwaEmbedding.
Args:
texts: The list of texts need to be embedded
Returns:
List of embeddings, one for each text.
"""
return self.client.EmbeddingBatch(texts)
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using AwaEmbedding.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.client.Embedding(text)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/volcengine.py | from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env, pre_init
from pydantic import BaseModel
logger = logging.getLogger(__name__)
class VolcanoEmbeddings(BaseModel, Embeddings):
"""`Volcengine Embeddings` embedding models."""
volcano_ak: Optional[str] = None
"""volcano access key
learn more from: https://www.volcengine.com/docs/6459/76491#ak-sk"""
volcano_sk: Optional[str] = None
"""volcano secret key
learn more from: https://www.volcengine.com/docs/6459/76491#ak-sk"""
host: str = "maas-api.ml-platform-cn-beijing.volces.com"
"""host
learn more from https://www.volcengine.com/docs/82379/1174746"""
region: str = "cn-beijing"
"""region
learn more from https://www.volcengine.com/docs/82379/1174746"""
model: str = "bge-large-zh"
"""Model name
you could get from https://www.volcengine.com/docs/82379/1174746
for now, we support bge_large_zh
"""
version: str = "1.0"
""" model version """
chunk_size: int = 100
"""Chunk size when multiple texts are input"""
client: Any
"""volcano client"""
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""
Validate whether volcano_ak and volcano_sk in the environment variables or
configuration file are available or not.
init volcano embedding client with `ak`, `sk`, `host`, `region`
Args:
values: a dictionary containing configuration information, must include the
fields of volcano_ak and volcano_sk
Returns:
a dictionary containing configuration information. If volcano_ak and
volcano_sk are not provided in the environment variables or configuration
file,the original values will be returned; otherwise, values containing
volcano_ak and volcano_sk will be returned.
Raises:
ValueError: volcengine package not found, please install it with
`pip install volcengine`
"""
values["volcano_ak"] = get_from_dict_or_env(
values,
"volcano_ak",
"VOLC_ACCESSKEY",
)
values["volcano_sk"] = get_from_dict_or_env(
values,
"volcano_sk",
"VOLC_SECRETKEY",
)
try:
from volcengine.maas import MaasService
client = MaasService(values["host"], values["region"])
client.set_ak(values["volcano_ak"])
client.set_sk(values["volcano_sk"])
values["client"] = client
except ImportError:
raise ImportError(
"volcengine package not found, please install it with "
"`pip install volcengine`"
)
return values
def embed_query(self, text: str) -> List[float]:
return self.embed_documents([text])[0]
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Embeds a list of text documents using the AutoVOT algorithm.
Args:
texts (List[str]): A list of text documents to embed.
Returns:
List[List[float]]: A list of embeddings for each document in the input list.
Each embedding is represented as a list of float values.
"""
text_in_chunks = [
texts[i : i + self.chunk_size]
for i in range(0, len(texts), self.chunk_size)
]
lst = []
for chunk in text_in_chunks:
req = {
"model": {
"name": self.model,
"version": self.version,
},
"input": chunk,
}
try:
from volcengine.maas import MaasException
resp = self.client.embeddings(req)
lst.extend([res["embedding"] for res in resp["data"]])
except MaasException as e:
raise ValueError(f"embed by volcengine Error: {e}")
return lst
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/elasticsearch.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain_core._api import deprecated
from langchain_core.utils import get_from_env
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
from langchain_core.embeddings import Embeddings
@deprecated(
"0.1.11", alternative="Use class in langchain-elasticsearch package", pending=True
)
class ElasticsearchEmbeddings(Embeddings):
"""Elasticsearch embedding models.
This class provides an interface to generate embeddings using a model deployed
in an Elasticsearch cluster. It requires an Elasticsearch connection object
and the model_id of the model deployed in the cluster.
In Elasticsearch you need to have an embedding model loaded and deployed.
- https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html
- https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html
"""
def __init__(
self,
client: MlClient,
model_id: str,
*,
input_field: str = "text_field",
):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
@classmethod
def from_credentials(
cls,
model_id: str,
*,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""Instantiate embeddings from Elasticsearch credentials.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
es_cloud_id: (str, optional): The Elasticsearch cloud ID to connect to.
es_user: (str, optional): Elasticsearch username.
es_password: (str, optional): Elasticsearch password.
Example:
.. code-block:: python
from langchain_community.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Credentials can be passed in two ways. Either set the env vars
# ES_CLOUD_ID, ES_USER, ES_PASSWORD and they will be automatically
# pulled in, or pass them in directly as kwargs.
embeddings = ElasticsearchEmbeddings.from_credentials(
model_id,
input_field=input_field,
# es_cloud_id="foo",
# es_user="bar",
# es_password="baz",
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
try:
from elasticsearch import Elasticsearch
from elasticsearch.client import MlClient
except ImportError:
raise ImportError(
"elasticsearch package not found, please install with 'pip install "
"elasticsearch'"
)
es_cloud_id = es_cloud_id or get_from_env("es_cloud_id", "ES_CLOUD_ID")
es_user = es_user or get_from_env("es_user", "ES_USER")
es_password = es_password or get_from_env("es_password", "ES_PASSWORD")
# Connect to Elasticsearch
es_connection = Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
client = MlClient(es_connection)
return cls(client, model_id, input_field=input_field)
@classmethod
def from_es_connection(
cls,
model_id: str,
es_connection: Elasticsearch,
input_field: str = "text_field",
) -> ElasticsearchEmbeddings:
"""
Instantiate embeddings from an existing Elasticsearch connection.
This method provides a way to create an instance of the ElasticsearchEmbeddings
class using an existing Elasticsearch connection. The connection object is used
to create an MlClient, which is then used to initialize the
ElasticsearchEmbeddings instance.
Args:
model_id (str): The model_id of the model deployed in the Elasticsearch cluster.
es_connection (elasticsearch.Elasticsearch): An existing Elasticsearch
connection object. input_field (str, optional): The name of the key for the
input text field in the document. Defaults to 'text_field'.
Returns:
ElasticsearchEmbeddings: An instance of the ElasticsearchEmbeddings class.
Example:
.. code-block:: python
from elasticsearch import Elasticsearch
from langchain_community.embeddings import ElasticsearchEmbeddings
# Define the model ID and input field name (if different from default)
model_id = "your_model_id"
# Optional, only if different from 'text_field'
input_field = "your_input_field"
# Create Elasticsearch connection
es_connection = Elasticsearch(
hosts=["localhost:9200"], http_auth=("user", "password")
)
# Instantiate ElasticsearchEmbeddings using the existing connection
embeddings = ElasticsearchEmbeddings.from_es_connection(
model_id,
es_connection,
input_field=input_field,
)
documents = [
"This is an example document.",
"Another example document to generate embeddings for.",
]
embeddings_generator.embed_documents(documents)
"""
# Importing MlClient from elasticsearch.client within the method to
# avoid unnecessary import if the method is not used
from elasticsearch.client import MlClient
# Create an MlClient from the given Elasticsearch connection
client = MlClient(es_connection)
# Return a new instance of the ElasticsearchEmbeddings class with
# the MlClient, model_id, and input_field
return cls(client, model_id, input_field=input_field)
def _embedding_func(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for the given texts using the Elasticsearch model.
Args:
texts (List[str]): A list of text strings to generate embeddings for.
Returns:
List[List[float]]: A list of embeddings, one for each text in the input
list.
"""
response = self.client.infer_trained_model(
model_id=self.model_id, docs=[{self.input_field: text} for text in texts]
)
embeddings = [doc["predicted_value"] for doc in response["inference_results"]]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generate embeddings for a list of documents.
Args:
texts (List[str]): A list of document text strings to generate embeddings
for.
Returns:
List[List[float]]: A list of embeddings, one for each document in the input
list.
"""
return self._embedding_func(texts)
def embed_query(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
return self._embedding_func([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/nemo.py | from __future__ import annotations
import asyncio
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import pre_init
from pydantic import BaseModel
def is_endpoint_live(url: str, headers: Optional[dict], payload: Any) -> bool:
"""
Check if an endpoint is live by sending a GET request to the specified URL.
Args:
url (str): The URL of the endpoint to check.
Returns:
bool: True if the endpoint is live (status code 200), False otherwise.
Raises:
Exception: If the endpoint returns a non-successful status code or if there is
an error querying the endpoint.
"""
try:
response = requests.request("POST", url, headers=headers, data=payload)
# Check if the status code is 200 (OK)
if response.status_code == 200:
return True
else:
# Raise an exception if the status code is not 200
raise Exception(
f"Endpoint returned a non-successful status code: "
f"{response.status_code}"
)
except requests.exceptions.RequestException as e:
# Handle any exceptions (e.g., connection errors)
raise Exception(f"Error querying the endpoint: {e}")
@deprecated(
since="0.0.37",
removal="1.0.0",
message=(
"Directly instantiating a NeMoEmbeddings from langchain-community is "
"deprecated. Please use langchain-nvidia-ai-endpoints NVIDIAEmbeddings "
"interface."
),
)
class NeMoEmbeddings(BaseModel, Embeddings):
"""NeMo embedding models."""
batch_size: int = 16
model: str = "NV-Embed-QA-003"
api_endpoint_url: str = "http://localhost:8088/v1/embeddings"
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the end point is alive using the values that are provided."""
url = values["api_endpoint_url"]
model = values["model"]
# Optional: A minimal test payload and headers required by the endpoint
headers = {"Content-Type": "application/json"}
payload = json.dumps(
{"input": "Hello World", "model": model, "input_type": "query"}
)
is_endpoint_live(url, headers, payload)
return values
async def _aembedding_func(
self, session: Any, text: str, input_type: str
) -> List[float]:
"""Async call out to embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
headers = {"Content-Type": "application/json"}
async with session.post(
self.api_endpoint_url,
json={"input": text, "model": self.model, "input_type": input_type},
headers=headers,
) as response:
response.raise_for_status()
answer = await response.text()
answer = json.loads(answer)
return answer["data"][0]["embedding"]
def _embedding_func(self, text: str, input_type: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
payload = json.dumps(
{"input": text, "model": self.model, "input_type": input_type}
)
headers = {"Content-Type": "application/json"}
response = requests.request(
"POST", self.api_endpoint_url, headers=headers, data=payload
)
response_json = json.loads(response.text)
embedding = response_json["data"][0]["embedding"]
return embedding
def embed_documents(self, documents: List[str]) -> List[List[float]]:
"""Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return [self._embedding_func(text, input_type="passage") for text in documents]
def embed_query(self, text: str) -> List[float]:
return self._embedding_func(text, input_type="query")
async def aembed_query(self, text: str) -> List[float]:
"""Call out to NeMo's embedding endpoint async for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
async with aiohttp.ClientSession() as session:
embedding = await self._aembedding_func(session, text, "passage")
return embedding
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to NeMo's embedding endpoint async for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
async with aiohttp.ClientSession() as session:
for batch in range(0, len(texts), self.batch_size):
text_batch = texts[batch : batch + self.batch_size]
for text in text_batch:
# Create tasks for all texts in the batch
tasks = [
self._aembedding_func(session, text, "passage")
for text in text_batch
]
# Run all tasks concurrently
batch_results = await asyncio.gather(*tasks)
# Extend the embeddings list with results from this batch
embeddings.extend(batch_results)
return embeddings
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/clova.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, cast
import requests
from langchain_core._api.deprecation import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
@deprecated(
since="0.3.4",
removal="1.0.0",
alternative_import="langchain_community.ClovaXEmbeddings",
)
class ClovaEmbeddings(BaseModel, Embeddings):
"""
Clova's embedding service.
To use this service,
you should have the following environment variables
set with your API tokens and application ID,
or pass them as named parameters to the constructor:
- ``CLOVA_EMB_API_KEY``: API key for accessing Clova's embedding service.
- ``CLOVA_EMB_APIGW_API_KEY``: API gateway key for enhanced security.
- ``CLOVA_EMB_APP_ID``: Application ID for identifying your application.
Example:
.. code-block:: python
from langchain_community.embeddings import ClovaEmbeddings
embeddings = ClovaEmbeddings(
clova_emb_api_key='your_clova_emb_api_key',
clova_emb_apigw_api_key='your_clova_emb_apigw_api_key',
app_id='your_app_id'
)
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
"""
endpoint_url: str = (
"https://clovastudio.apigw.ntruss.com/testapp/v1/api-tools/embedding"
)
"""Endpoint URL to use."""
model: str = "clir-emb-dolphin"
"""Embedding model name to use."""
clova_emb_api_key: Optional[SecretStr] = None
"""API key for accessing Clova's embedding service."""
clova_emb_apigw_api_key: Optional[SecretStr] = None
"""API gateway key for enhanced security."""
app_id: Optional[SecretStr] = None
"""Application ID for identifying your application."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate api key exists in environment."""
values["clova_emb_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "clova_emb_api_key", "CLOVA_EMB_API_KEY")
)
values["clova_emb_apigw_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values, "clova_emb_apigw_api_key", "CLOVA_EMB_APIGW_API_KEY"
)
)
values["app_id"] = convert_to_secret_str(
get_from_dict_or_env(values, "app_id", "CLOVA_EMB_APP_ID")
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Embed a list of texts and return their embeddings.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for text in texts:
embeddings.append(self._embed_text(text))
return embeddings
def embed_query(self, text: str) -> List[float]:
"""
Embed a single query text and return its embedding.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed_text(text)
def _embed_text(self, text: str) -> List[float]:
"""
Internal method to call the embedding API and handle the response.
"""
payload = {"text": text}
# HTTP headers for authorization
headers = {
"X-NCP-CLOVASTUDIO-API-KEY": cast(
SecretStr, self.clova_emb_api_key
).get_secret_value(),
"X-NCP-APIGW-API-KEY": cast(
SecretStr, self.clova_emb_apigw_api_key
).get_secret_value(),
"Content-Type": "application/json",
}
# send request
app_id = cast(SecretStr, self.app_id).get_secret_value()
response = requests.post(
f"{self.endpoint_url}/{self.model}/{app_id}",
headers=headers,
json=payload,
)
# check for errors
if response.status_code == 200:
response_data = response.json()
if "result" in response_data and "embedding" in response_data["result"]:
return response_data["result"]["embedding"]
raise ValueError(
f"API request failed with status {response.status_code}: {response.text}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/octoai_embeddings.py | from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://text.octoai.run/v1/"
DEFAULT_MODEL = "thenlper/gte-large"
class OctoAIEmbeddings(OpenAIEmbeddings):
"""OctoAI Compute Service embedding models.
See https://octo.ai/ for information about OctoAI.
To use, you should have the ``openai`` python package installed and the
environment variable ``OCTOAI_API_TOKEN`` set with your API token.
Alternatively, you can use the octoai_api_token keyword argument.
"""
octoai_api_token: Optional[SecretStr] = Field(default=None)
"""OctoAI Endpoints API keys."""
endpoint_url: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests."""
model: str = Field(default=DEFAULT_MODEL)
"""Model name to use."""
tiktoken_enabled: bool = False
"""Set this to False for non-OpenAI implementations of the embeddings API"""
@property
def _llm_type(self) -> str:
"""Return type of embeddings model."""
return "octoai-embeddings"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"octoai_api_token": "OCTOAI_API_TOKEN"}
@pre_init
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["endpoint_url"] = get_from_dict_or_env(
values,
"endpoint_url",
"ENDPOINT_URL",
default=DEFAULT_API_BASE,
)
values["octoai_api_token"] = convert_to_secret_str(
get_from_dict_or_env(values, "octoai_api_token", "OCTOAI_API_TOKEN")
)
values["model"] = get_from_dict_or_env(
values,
"model",
"MODEL",
default=DEFAULT_MODEL,
)
try:
import openai
if is_openai_v1():
client_params = {
"api_key": values["octoai_api_token"].get_secret_value(),
"base_url": values["endpoint_url"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).embeddings
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(
**client_params
).embeddings
else:
values["openai_api_base"] = values["endpoint_url"]
values["openai_api_key"] = values["octoai_api_token"].get_secret_value()
values["client"] = openai.Embedding # type: ignore[attr-defined]
values["async_client"] = openai.Embedding # type: ignore[attr-defined]
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/textembed.py | """
TextEmbed: Embedding Inference Server
TextEmbed provides a high-throughput, low-latency solution for serving embeddings.
It supports various sentence-transformer models.
Now, it includes the ability to deploy image embedding models.
TextEmbed offers flexibility and scalability for diverse applications.
TextEmbed is maintained by Keval Dekivadiya and is licensed under the Apache-2.0 license.
""" # noqa: E501
import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import aiohttp
import numpy as np
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import from_env, secret_from_env
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
__all__ = ["TextEmbedEmbeddings"]
class TextEmbedEmbeddings(BaseModel, Embeddings):
"""
A class to handle embedding requests to the TextEmbed API.
Attributes:
model : The TextEmbed model ID to use for embeddings.
api_url : The base URL for the TextEmbed API.
api_key : The API key for authenticating with the TextEmbed API.
client : The TextEmbed client instance.
Example:
.. code-block:: python
from langchain_community.embeddings import TextEmbedEmbeddings
embeddings = TextEmbedEmbeddings(
model="sentence-transformers/clip-ViT-B-32",
api_url="http://localhost:8000/v1",
api_key="<API_KEY>"
)
For more information: https://github.com/kevaldekivadiya2415/textembed/blob/main/docs/setup.md
""" # noqa: E501
model: str
"""Underlying TextEmbed model id."""
api_url: str = Field(
default_factory=from_env(
"TEXTEMBED_API_URL", default="http://localhost:8000/v1"
)
)
"""Endpoint URL to use."""
api_key: SecretStr = Field(default_factory=secret_from_env("TEXTEMBED_API_KEY"))
"""API Key for authentication"""
client: Any = None
"""TextEmbed client."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and URL exist in the environment."""
self.client = AsyncOpenAITextEmbedEmbeddingClient(
host=self.api_url, api_key=self.api_key.get_secret_value()
)
return self
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to TextEmbed's embedding endpoint.
Args:
texts (List[str]): The list of texts to embed.
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model,
texts=texts,
)
return embeddings
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to TextEmbed's embedding endpoint.
Args:
texts (List[str]): The list of texts to embed.
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
embeddings = await self.client.aembed(
model=self.model,
texts=texts,
)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to TextEmbed's embedding endpoint for a single query.
Args:
text (str): The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
return self.embed_documents([text])[0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to TextEmbed's embedding endpoint for a single query.
Args:
text (str): The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
embeddings = await self.aembed_documents([text])
return embeddings[0]
class AsyncOpenAITextEmbedEmbeddingClient:
"""
A client to handle synchronous and asynchronous requests to the TextEmbed API.
Attributes:
host (str): The base URL for the TextEmbed API.
api_key (str): The API key for authenticating with the TextEmbed API.
aiosession (Optional[aiohttp.ClientSession]): The aiohttp session for async requests.
_batch_size (int): Maximum batch size for a single request.
""" # noqa: E501
def __init__(
self,
host: str = "http://localhost:8000/v1",
api_key: Union[str, None] = None,
aiosession: Optional[aiohttp.ClientSession] = None,
) -> None:
self.host = host
self.api_key = api_key
self.aiosession = aiosession
if self.host is None or len(self.host) < 3:
raise ValueError("Parameter `host` must be set to a valid URL")
self._batch_size = 256
@staticmethod
def _permute(
texts: List[str], sorter: Callable = len
) -> Tuple[List[str], Callable]:
"""
Sorts texts in ascending order and provides a function to restore the original order.
Args:
texts (List[str]): List of texts to sort.
sorter (Callable, optional): Sorting function, defaults to length.
Returns:
Tuple[List[str], Callable]: Sorted texts and a function to restore original order.
""" # noqa: E501
if len(texts) == 1:
return texts, lambda t: t
length_sorted_idx = np.argsort([-sorter(sen) for sen in texts])
texts_sorted = [texts[idx] for idx in length_sorted_idx]
return texts_sorted, lambda unsorted_embeddings: [
unsorted_embeddings[idx] for idx in np.argsort(length_sorted_idx)
]
def _batch(self, texts: List[str]) -> List[List[str]]:
"""
Splits a list of texts into batches of size max `self._batch_size`.
Args:
texts (List[str]): List of texts to split.
Returns:
List[List[str]]: List of batches of texts.
"""
if len(texts) == 1:
return [texts]
batches = []
for start_index in range(0, len(texts), self._batch_size):
batches.append(texts[start_index : start_index + self._batch_size])
return batches
@staticmethod
def _unbatch(batch_of_texts: List[List[Any]]) -> List[Any]:
"""
Merges batches of texts into a single list.
Args:
batch_of_texts (List[List[Any]]): List of batches of texts.
Returns:
List[Any]: Merged list of texts.
"""
if len(batch_of_texts) == 1 and len(batch_of_texts[0]) == 1:
return batch_of_texts[0]
texts = []
for sublist in batch_of_texts:
texts.extend(sublist)
return texts
def _kwargs_post_request(self, model: str, texts: List[str]) -> Dict[str, Any]:
"""
Builds the kwargs for the POST request, used by sync method.
Args:
model (str): The model to use for embedding.
texts (List[str]): List of texts to embed.
Returns:
Dict[str, Any]: Dictionary of POST request parameters.
"""
return dict(
url=f"{self.host}/embedding",
headers={
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {self.api_key}",
},
json=dict(
input=texts,
model=model,
),
)
def _sync_request_embed(
self, model: str, batch_texts: List[str]
) -> List[List[float]]:
"""
Sends a synchronous request to the embedding endpoint.
Args:
model (str): The model to use for embedding.
batch_texts (List[str]): Batch of texts to embed.
Returns:
List[List[float]]: List of embeddings for the batch.
Raises:
Exception: If the response status is not 200.
"""
response = requests.post(
**self._kwargs_post_request(model=model, texts=batch_texts)
)
if response.status_code != 200:
raise Exception(
f"TextEmbed responded with an unexpected status message "
f"{response.status_code}: {response.text}"
)
return [e["embedding"] for e in response.json()["data"]]
def embed(self, model: str, texts: List[str]) -> List[List[float]]:
"""
Embeds a list of texts synchronously.
Args:
model (str): The model to use for embedding.
texts (List[str]): List of texts to embed.
Returns:
List[List[float]]: List of embeddings for the texts.
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
# Request
map_args = (
self._sync_request_embed,
[model] * len(perm_texts_batched),
perm_texts_batched,
)
if len(perm_texts_batched) == 1:
embeddings_batch_perm = list(map(*map_args))
else:
with ThreadPoolExecutor(32) as p:
embeddings_batch_perm = list(p.map(*map_args))
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
async def _async_request(
self, session: aiohttp.ClientSession, **kwargs: Dict[str, Any]
) -> List[List[float]]:
"""
Sends an asynchronous request to the embedding endpoint.
Args:
session (aiohttp.ClientSession): The aiohttp session for the request.
kwargs (Dict[str, Any]): Dictionary of POST request parameters.
Returns:
List[List[float]]: List of embeddings for the request.
Raises:
Exception: If the response status is not 200.
"""
async with session.post(**kwargs) as response: # type: ignore
if response.status != 200:
raise Exception(
f"TextEmbed responded with an unexpected status message "
f"{response.status}: {response.text}"
)
embedding = (await response.json())["data"]
return [e["embedding"] for e in embedding]
async def aembed(self, model: str, texts: List[str]) -> List[List[float]]:
"""
Embeds a list of texts asynchronously.
Args:
model (str): The model to use for embedding.
texts (List[str]): List of texts to embed.
Returns:
List[List[float]]: List of embeddings for the texts.
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=32)
) as session:
embeddings_batch_perm = await asyncio.gather(
*[
self._async_request(
session=session,
**self._kwargs_post_request(model=model, texts=t),
)
for t in perm_texts_batched
]
)
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/bookend.py | """Wrapper around Bookend AI embedding models."""
import json
from typing import Any, List
import requests
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, Field
API_URL = "https://api.bookend.ai/"
DEFAULT_TASK = "embeddings"
PATH = "/models/predict"
class BookendEmbeddings(BaseModel, Embeddings):
"""Bookend AI sentence_transformers embedding models.
Example:
.. code-block:: python
from langchain_community.embeddings import BookendEmbeddings
bookend = BookendEmbeddings(
domain={domain}
api_token={api_token}
model_id={model_id}
)
bookend.embed_documents([
"Please put on these earmuffs because I can't you hear.",
"Baby wipes are made of chocolate stardust.",
])
bookend.embed_query(
"She only paints with bold colors; she does not like pastels."
)
"""
domain: str
"""Request for a domain at https://bookend.ai/ to use this embeddings module."""
api_token: str
"""Request for an API token at https://bookend.ai/ to use this embeddings module."""
model_id: str
"""Embeddings model ID to use."""
auth_header: dict = Field(default_factory=dict)
model_config = ConfigDict(protected_namespaces=())
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self.auth_header = {"Authorization": "Basic {}".format(self.api_token)}
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a Bookend deployed embeddings model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
result = []
headers = self.auth_header
headers["Content-Type"] = "application/json; charset=utf-8"
params = {
"model_id": self.model_id,
"task": DEFAULT_TASK,
}
for text in texts:
data = json.dumps(
{"text": text, "question": None, "context": None, "instruction": None}
)
r = requests.request(
"POST",
API_URL + self.domain + PATH,
headers=headers,
params=params,
data=data,
)
result.append(r.json()[0]["data"])
return result
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Bookend deployed embeddings model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/infinity.py | """written under MIT Licence, Michael Feil 2023."""
import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Callable, Dict, List, Optional, Tuple
import aiohttp
import numpy as np
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
__all__ = ["InfinityEmbeddings"]
class InfinityEmbeddings(BaseModel, Embeddings):
"""Self-hosted embedding models for `infinity` package.
See https://github.com/michaelfeil/infinity
This also works for text-embeddings-inference and other
self-hosted openai-compatible servers.
Infinity is a package to interact with Embedding Models on https://github.com/michaelfeil/infinity
Example:
.. code-block:: python
from langchain_community.embeddings import InfinityEmbeddings
InfinityEmbeddings(
model="BAAI/bge-small",
infinity_api_url="http://localhost:7997",
)
"""
model: str
"Underlying Infinity model id."
infinity_api_url: str = "http://localhost:7997"
"""Endpoint URL to use."""
client: Any = None #: :meta private:
"""Infinity client."""
# LLM call kwargs
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["infinity_api_url"] = get_from_dict_or_env(
values, "infinity_api_url", "INFINITY_API_URL"
)
values["client"] = TinyAsyncOpenAIInfinityEmbeddingClient(
host=values["infinity_api_url"],
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Infinity's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model,
texts=texts,
)
return embeddings
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Infinity's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = await self.client.aembed(
model=self.model,
texts=texts,
)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Infinity's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Infinity's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = await self.aembed_documents([text])
return embeddings[0]
class TinyAsyncOpenAIInfinityEmbeddingClient: #: :meta private:
"""Helper tool to embed Infinity.
It is not a part of Langchain's stable API,
direct use discouraged.
Example:
.. code-block:: python
mini_client = TinyAsyncInfinityEmbeddingClient(
)
embeds = mini_client.embed(
model="BAAI/bge-small",
text=["doc1", "doc2"]
)
# or
embeds = await mini_client.aembed(
model="BAAI/bge-small",
text=["doc1", "doc2"]
)
"""
def __init__(
self,
host: str = "http://localhost:7797/v1",
aiosession: Optional[aiohttp.ClientSession] = None,
) -> None:
self.host = host
self.aiosession = aiosession
if self.host is None or len(self.host) < 3:
raise ValueError(" param `host` must be set to a valid url")
self._batch_size = 128
@staticmethod
def _permute(
texts: List[str], sorter: Callable = len
) -> Tuple[List[str], Callable]:
"""Sort texts in ascending order, and
delivers a lambda expr, which can sort a same length list
https://github.com/UKPLab/sentence-transformers/blob/
c5f93f70eca933c78695c5bc686ceda59651ae3b/sentence_transformers/SentenceTransformer.py#L156
Args:
texts (List[str]): _description_
sorter (Callable, optional): _description_. Defaults to len.
Returns:
Tuple[List[str], Callable]: _description_
Example:
```
texts = ["one","three","four"]
perm_texts, undo = self._permute(texts)
texts == undo(perm_texts)
```
"""
if len(texts) == 1:
# special case query
return texts, lambda t: t
length_sorted_idx = np.argsort([-sorter(sen) for sen in texts])
texts_sorted = [texts[idx] for idx in length_sorted_idx]
return texts_sorted, lambda unsorted_embeddings: [ # E731
unsorted_embeddings[idx] for idx in np.argsort(length_sorted_idx)
]
def _batch(self, texts: List[str]) -> List[List[str]]:
"""
splits Lists of text parts into batches of size max `self._batch_size`
When encoding vector database,
Args:
texts (List[str]): List of sentences
self._batch_size (int, optional): max batch size of one request.
Returns:
List[List[str]]: Batches of List of sentences
"""
if len(texts) == 1:
# special case query
return [texts]
batches = []
for start_index in range(0, len(texts), self._batch_size):
batches.append(texts[start_index : start_index + self._batch_size])
return batches
@staticmethod
def _unbatch(batch_of_texts: List[List[Any]]) -> List[Any]:
if len(batch_of_texts) == 1 and len(batch_of_texts[0]) == 1:
# special case query
return batch_of_texts[0]
texts = []
for sublist in batch_of_texts:
texts.extend(sublist)
return texts
def _kwargs_post_request(self, model: str, texts: List[str]) -> Dict[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
model (str): _description_
texts (List[str]): _description_
Returns:
Dict[str, Collection[str]]: _description_
"""
return dict(
url=f"{self.host}/embeddings",
headers={
# "accept": "application/json",
"content-type": "application/json",
},
json=dict(
input=texts,
model=model,
),
)
def _sync_request_embed(
self, model: str, batch_texts: List[str]
) -> List[List[float]]:
response = requests.post(
**self._kwargs_post_request(model=model, texts=batch_texts)
)
if response.status_code != 200:
raise Exception(
f"Infinity returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
return [e["embedding"] for e in response.json()["data"]]
def embed(self, model: str, texts: List[str]) -> List[List[float]]:
"""call the embedding of model
Args:
model (str): to embedding model
texts (List[str]): List of sentences to embed.
Returns:
List[List[float]]: List of vectors for each sentence
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
# Request
map_args = (
self._sync_request_embed,
[model] * len(perm_texts_batched),
perm_texts_batched,
)
if len(perm_texts_batched) == 1:
embeddings_batch_perm = list(map(*map_args))
else:
with ThreadPoolExecutor(32) as p:
embeddings_batch_perm = list(p.map(*map_args))
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
async def _async_request(
self, session: aiohttp.ClientSession, kwargs: Dict[str, Any]
) -> List[List[float]]:
async with session.post(**kwargs) as response:
if response.status != 200:
raise Exception(
f"Infinity returned an unexpected response with status "
f"{response.status}: {response.text}"
)
embedding = (await response.json())["data"]
return [e["embedding"] for e in embedding]
async def aembed(self, model: str, texts: List[str]) -> List[List[float]]:
"""call the embedding of model, async method
Args:
model (str): to embedding model
texts (List[str]): List of sentences to embed.
Returns:
List[List[float]]: List of vectors for each sentence
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
# Request
async with aiohttp.ClientSession(
trust_env=True, connector=aiohttp.TCPConnector(limit=32)
) as session:
embeddings_batch_perm = await asyncio.gather(
*[
self._async_request(
session=session,
kwargs=self._kwargs_post_request(model=model, texts=t),
)
for t in perm_texts_batched
]
)
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/optimum_intel.py | from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
class QuantizedBiEncoderEmbeddings(BaseModel, Embeddings):
"""Quantized bi-encoders embedding models.
Please ensure that you have installed optimum-intel and ipex.
Input:
model_name: str = Model name.
max_seq_len: int = The maximum sequence length for tokenization. (default 512)
pooling_strategy: str =
"mean" or "cls", pooling strategy for the final layer. (default "mean")
query_instruction: Optional[str] =
An instruction to add to the query before embedding. (default None)
document_instruction: Optional[str] =
An instruction to add to each document before embedding. (default None)
padding: Optional[bool] =
Whether to add padding during tokenization or not. (default True)
model_kwargs: Optional[Dict] =
Parameters to add to the model during initialization. (default {})
encode_kwargs: Optional[Dict] =
Parameters to add during the embedding forward pass. (default {})
Example:
from langchain_community.embeddings import QuantizedBiEncoderEmbeddings
model_name = "Intel/bge-small-en-v1.5-rag-int8-static"
encode_kwargs = {'normalize_embeddings': True}
hf = QuantizedBiEncoderEmbeddings(
model_name,
encode_kwargs=encode_kwargs,
query_instruction="Represent this sentence for searching relevant passages: "
)
"""
def __init__(
self,
model_name: str,
max_seq_len: int = 512,
pooling_strategy: str = "mean", # "mean" or "cls"
query_instruction: Optional[str] = None,
document_instruction: Optional[str] = None,
padding: bool = True,
model_kwargs: Optional[Dict] = None,
encode_kwargs: Optional[Dict] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.model_name_or_path = model_name
self.max_seq_len = max_seq_len
self.pooling = pooling_strategy
self.padding = padding
self.encode_kwargs = encode_kwargs or {}
self.model_kwargs = model_kwargs or {}
self.normalize = self.encode_kwargs.get("normalize_embeddings", False)
self.batch_size = self.encode_kwargs.get("batch_size", 32)
self.query_instruction = query_instruction
self.document_instruction = document_instruction
self.load_model()
def load_model(self) -> None:
try:
from transformers import AutoTokenizer
except ImportError as e:
raise ImportError(
"Unable to import transformers, please install with "
"`pip install -U transformers`."
) from e
try:
from optimum.intel import IPEXModel
self.transformer_model = IPEXModel.from_pretrained(
self.model_name_or_path, **self.model_kwargs
)
except Exception as e:
raise Exception(
f"""
Failed to load model {self.model_name_or_path}, due to the following error:
{e}
Please ensure that you have installed optimum-intel and ipex correctly,using:
pip install optimum[neural-compressor]
pip install intel_extension_for_pytorch
For more information, please visit:
* Install optimum-intel as shown here: https://github.com/huggingface/optimum-intel.
* Install IPEX as shown here: https://intel.github.io/intel-extension-for-pytorch/index.html#installation?platform=cpu&version=v2.2.0%2Bcpu.
"""
)
self.transformer_tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=self.model_name_or_path,
)
self.transformer_model.eval()
model_config = ConfigDict(
extra="allow",
protected_namespaces=(),
)
def _embed(self, inputs: Any) -> Any:
try:
import torch
except ImportError as e:
raise ImportError(
"Unable to import torch, please install with `pip install -U torch`."
) from e
with torch.inference_mode():
outputs = self.transformer_model(**inputs)
if self.pooling == "mean":
emb = self._mean_pooling(outputs, inputs["attention_mask"])
elif self.pooling == "cls":
emb = self._cls_pooling(outputs)
else:
raise ValueError("pooling method no supported")
if self.normalize:
emb = torch.nn.functional.normalize(emb, p=2, dim=1)
return emb
@staticmethod
def _cls_pooling(outputs: Any) -> Any:
if isinstance(outputs, dict):
token_embeddings = outputs["last_hidden_state"]
else:
token_embeddings = outputs[0]
return token_embeddings[:, 0]
@staticmethod
def _mean_pooling(outputs: Any, attention_mask: Any) -> Any:
try:
import torch
except ImportError as e:
raise ImportError(
"Unable to import torch, please install with `pip install -U torch`."
) from e
if isinstance(outputs, dict):
token_embeddings = outputs["last_hidden_state"]
else:
# First element of model_output contains all token embeddings
token_embeddings = outputs[0]
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
)
sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
return sum_embeddings / sum_mask
def _embed_text(self, texts: List[str]) -> List[List[float]]:
inputs = self.transformer_tokenizer(
texts,
max_length=self.max_seq_len,
truncation=True,
padding=self.padding,
return_tensors="pt",
)
return self._embed(inputs).tolist()
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of text documents using the Optimized Embedder model.
Input:
texts: List[str] = List of text documents to embed.
Output:
List[List[float]] = The embeddings of each text document.
"""
try:
import pandas as pd
except ImportError as e:
raise ImportError(
"Unable to import pandas, please install with `pip install -U pandas`."
) from e
try:
from tqdm import tqdm
except ImportError as e:
raise ImportError(
"Unable to import tqdm, please install with `pip install -U tqdm`."
) from e
docs = [
self.document_instruction + d if self.document_instruction else d
for d in texts
]
# group into batches
text_list_df = pd.DataFrame(docs, columns=["texts"]).reset_index()
# assign each example with its batch
text_list_df["batch_index"] = text_list_df["index"] // self.batch_size
# create groups
batches = list(text_list_df.groupby(["batch_index"])["texts"].apply(list))
vectors = []
for batch in tqdm(batches, desc="Batches"):
vectors += self._embed_text(batch)
return vectors
def embed_query(self, text: str) -> List[float]:
if self.query_instruction:
text = self.query_instruction + text
return self._embed_text([text])[0]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/solar.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
import requests
from langchain_core._api import deprecated
from langchain_core.embeddings import Embeddings
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import BaseModel, ConfigDict, SecretStr
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: SolarEmbeddings, *args: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
@deprecated(
since="0.0.34", removal="1.0", alternative_import="langchain_upstage.ChatUpstage"
)
class SolarEmbeddings(BaseModel, Embeddings):
"""Solar's embedding service.
To use, you should have the environment variable``SOLAR_API_KEY`` set
with your API token, or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.embeddings import SolarEmbeddings
embeddings = SolarEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
"""
endpoint_url: str = "https://api.upstage.ai/v1/solar/embeddings"
"""Endpoint URL to use."""
model: str = "solar-1-mini-embedding-query"
"""Embeddings model name to use."""
solar_api_key: Optional[SecretStr] = None
"""API Key for Solar API."""
model_config = ConfigDict(
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate api key exists in environment."""
solar_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "solar_api_key", "SOLAR_API_KEY")
)
values["solar_api_key"] = solar_api_key
return values
def embed(
self,
text: str,
) -> List[List[float]]:
payload = {
"model": self.model,
"input": text,
}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.solar_api_key.get_secret_value()}", # type: ignore[union-attr]
"Content-Type": "application/json",
}
# send request
response = requests.post(self.endpoint_url, headers=headers, json=payload)
parsed_response = response.json()
# check for errors
if len(parsed_response["data"]) == 0:
raise ValueError(
f"Solar API returned an error: {parsed_response['base_resp']}"
)
embedding = parsed_response["data"][0]["embedding"]
return embedding
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a Solar embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [embed_with_retry(self, text=text) for text in texts]
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Solar embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = embed_with_retry(self, text=text)
return embedding
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/embeddings/ovhcloud.py | import json
import logging
import time
from typing import Any, List
import requests
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict
logger = logging.getLogger(__name__)
class OVHCloudEmbeddings(BaseModel, Embeddings):
"""
OVHcloud AI Endpoints Embeddings.
"""
""" OVHcloud AI Endpoints Access Token"""
access_token: str = ""
""" OVHcloud AI Endpoints model name for embeddings generation"""
model_name: str = ""
""" OVHcloud AI Endpoints region"""
region: str = "kepler"
model_config = ConfigDict(extra="forbid", protected_namespaces=())
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
if self.access_token == "":
raise ValueError("Access token is required for OVHCloud embeddings.")
if self.model_name == "":
raise ValueError("Model name is required for OVHCloud embeddings.")
if self.region == "":
raise ValueError("Region is required for OVHCloud embeddings.")
def _generate_embedding(self, text: str) -> List[float]:
"""Generate embeddings from OVHCLOUD AIE.
Args:
text (str): The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
return self._send_request_to_ai_endpoints("text/plain", text, "text2vec")
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents.
Args:
texts (List[str]): The list of texts to embed.
Returns:
List[List[float]]: List of embeddings, one for each input text.
"""
return self._send_request_to_ai_endpoints(
"application/json", json.dumps(texts), "batch_text2vec"
)
def embed_query(self, text: str) -> List[float]:
"""Embed a single query text.
Args:
text (str): The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
return self._generate_embedding(text)
def _send_request_to_ai_endpoints(
self, contentType: str, payload: str, route: str
) -> Any:
"""Send a HTTPS request to OVHcloud AI Endpoints
Args:
contentType (str): The content type of the request, application/json or text/plain.
payload (str): The payload of the request.
route (str): The route of the request, batch_text2vec or text2vec.
""" # noqa: E501
headers = {
"content-type": contentType,
"Authorization": f"Bearer {self.access_token}",
}
session = requests.session()
while True:
response = session.post(
(
f"https://{self.model_name}.endpoints.{self.region}"
f".ai.cloud.ovh.net/api/{route}"
),
headers=headers,
data=payload,
)
if response.status_code != 200:
if response.status_code == 429:
"""Rate limit exceeded, wait for reset"""
reset_time = int(response.headers.get("RateLimit-Reset", 0))
logger.info("Rate limit exceeded. Waiting %d seconds.", reset_time)
if reset_time > 0:
time.sleep(reset_time)
continue
else:
"""Rate limit reset time has passed, retry immediately"""
continue
if response.status_code == 401:
""" Unauthorized, retry with new token """
raise ValueError("Unauthorized, retry with new token")
""" Handle other non-200 status codes """
raise ValueError(
"Request failed with status code: {status_code}, {text}".format(
status_code=response.status_code, text=response.text
)
)
return response.json()
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/rememberizer.py | from typing import List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_community.utilities.rememberizer import RememberizerAPIWrapper
class RememberizerRetriever(BaseRetriever, RememberizerAPIWrapper):
"""`Rememberizer` retriever.
It wraps load() to get_relevant_documents().
It uses all RememberizerAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.load(query=query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/zilliz.py | import warnings
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from pydantic import model_validator
from langchain_community.vectorstores.zilliz import Zilliz
# TODO: Update to ZillizClient + Hybrid Search when available
class ZillizRetriever(BaseRetriever):
"""`Zilliz API` retriever."""
embedding_function: Embeddings
"""The underlying embedding function from which documents will be retrieved."""
collection_name: str = "LangChainCollection"
"""The name of the collection in Zilliz."""
connection_args: Optional[Dict[str, Any]] = None
"""The connection arguments for the Zilliz client."""
consistency_level: str = "Session"
"""The consistency level for the Zilliz client."""
search_params: Optional[dict] = None
"""The search parameters for the Zilliz client."""
store: Zilliz
"""The underlying Zilliz store."""
retriever: BaseRetriever
"""The underlying retriever."""
@model_validator(mode="before")
@classmethod
def create_client(cls, values: dict) -> Any:
values["store"] = Zilliz(
values["embedding_function"],
values["collection_name"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Zilliz store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.invoke(
query, run_manager=run_manager.get_child(), **kwargs
)
def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever:
"""Deprecated ZillizRetreiver.
Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. "
"Please use ZillizRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return ZillizRetriever(*args, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/cohere_rag_retriever.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict, Field
if TYPE_CHECKING:
from langchain_core.messages import BaseMessage
def _get_docs(response: Any) -> List[Document]:
docs = (
[]
if "documents" not in response.generation_info
else [
Document(page_content=doc["snippet"], metadata=doc)
for doc in response.generation_info["documents"]
]
)
docs.append(
Document(
page_content=response.message.content,
metadata={
"type": "model_response",
"citations": response.generation_info["citations"],
"search_results": response.generation_info["search_results"],
"search_queries": response.generation_info["search_queries"],
"token_count": response.generation_info["token_count"],
},
)
)
return docs
@deprecated(
since="0.0.30",
removal="1.0",
alternative_import="langchain_cohere.CohereRagRetriever",
)
class CohereRagRetriever(BaseRetriever):
"""Cohere Chat API with RAG."""
connectors: List[Dict] = Field(default_factory=lambda: [{"id": "web-search"}])
"""
When specified, the model's reply will be enriched with information found by
querying each of the connectors (RAG). These will be returned as langchain
documents.
Currently only accepts {"id": "web-search"}.
"""
llm: BaseChatModel
"""Cohere ChatModel to use."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
messages: List[List[BaseMessage]] = [[HumanMessage(content=query)]]
res = self.llm.generate(
messages,
connectors=self.connectors,
callbacks=run_manager.get_child(),
**kwargs,
).generations[0][0]
return _get_docs(res)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
messages: List[List[BaseMessage]] = [[HumanMessage(content=query)]]
res = (
await self.llm.agenerate(
messages,
connectors=self.connectors,
callbacks=run_manager.get_child(),
**kwargs,
)
).generations[0][0]
return _get_docs(res)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/embedchain.py | """Wrapper around Embedchain Retriever."""
from __future__ import annotations
from typing import Any, Iterable, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class EmbedchainRetriever(BaseRetriever):
"""`Embedchain` retriever."""
client: Any
"""Embedchain Pipeline."""
@classmethod
def create(cls, yaml_path: Optional[str] = None) -> EmbedchainRetriever:
"""
Create a EmbedchainRetriever from a YAML configuration file.
Args:
yaml_path: Path to the YAML configuration file. If not provided,
a default configuration is used.
Returns:
An instance of EmbedchainRetriever.
"""
from embedchain import Pipeline
# Create an Embedchain Pipeline instance
if yaml_path:
client = Pipeline.from_config(yaml_path=yaml_path)
else:
client = Pipeline()
return cls(client=client)
def add_texts(
self,
texts: Iterable[str],
) -> List[str]:
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings/URLs to add to the retriever.
Returns:
List of ids from adding the texts into the retriever.
"""
ids = []
for text in texts:
_id = self.client.add(text)
ids.append(_id)
return ids
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
res = self.client.search(query)
docs = []
for r in res:
docs.append(
Document(
page_content=r["context"],
metadata={
"source": r["metadata"]["url"],
"document_id": r["metadata"]["doc_id"],
},
)
)
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/vespa_retriever.py | from __future__ import annotations
import json
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class VespaRetriever(BaseRetriever):
"""`Vespa` retriever."""
app: Any
"""Vespa application to query."""
body: Dict
"""Body of the query."""
content_field: str
"""Name of the content field."""
metadata_fields: Sequence[str]
"""Names of the metadata fields."""
def _query(self, body: Dict) -> List[Document]:
response = self.app.query(body)
if not str(response.status_code).startswith("2"):
raise RuntimeError(
"Could not retrieve data from Vespa. Error code: {}".format(
response.status_code
)
)
root = response.json["root"]
if "errors" in root:
raise RuntimeError(json.dumps(root["errors"]))
docs = []
for child in response.hits:
page_content = child["fields"].pop(self.content_field, "")
if self.metadata_fields == "*":
metadata = child["fields"]
else:
metadata = {mf: child["fields"].get(mf) for mf in self.metadata_fields}
metadata["id"] = child["id"]
docs.append(Document(page_content=page_content, metadata=metadata))
return docs
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
body = self.body.copy()
body["query"] = query
return self._query(body)
def get_relevant_documents_with_filter(
self, query: str, *, _filter: Optional[str] = None
) -> List[Document]:
body = self.body.copy()
_filter = f" and {_filter}" if _filter else ""
body["yql"] = body["yql"] + _filter
body["query"] = query
return self._query(body)
@classmethod
def from_params(
cls,
url: str,
content_field: str,
*,
k: Optional[int] = None,
metadata_fields: Union[Sequence[str], Literal["*"]] = (),
sources: Union[Sequence[str], Literal["*"], None] = None,
_filter: Optional[str] = None,
yql: Optional[str] = None,
**kwargs: Any,
) -> VespaRetriever:
"""Instantiate retriever from params.
Args:
url (str): Vespa app URL.
content_field (str): Field in results to return as Document page_content.
k (Optional[int]): Number of Documents to return. Defaults to None.
metadata_fields(Sequence[str] or "*"): Fields in results to include in
document metadata. Defaults to empty tuple ().
sources (Sequence[str] or "*" or None): Sources to retrieve
from. Defaults to None.
_filter (Optional[str]): Document filter condition expressed in YQL.
Defaults to None.
yql (Optional[str]): Full YQL query to be used. Should not be specified
if _filter or sources are specified. Defaults to None.
kwargs (Any): Keyword arguments added to query body.
Returns:
VespaRetriever: Instantiated VespaRetriever.
"""
try:
from vespa.application import Vespa
except ImportError:
raise ImportError(
"pyvespa is not installed, please install with `pip install pyvespa`"
)
app = Vespa(url)
body = kwargs.copy()
if yql and (sources or _filter):
raise ValueError(
"yql should only be specified if both sources and _filter are not "
"specified."
)
else:
if metadata_fields == "*":
_fields = "*"
body["summary"] = "short"
else:
_fields = ", ".join([content_field] + list(metadata_fields or []))
_sources = ", ".join(sources) if isinstance(sources, Sequence) else "*"
_filter = f" and {_filter}" if _filter else ""
yql = f"select {_fields} from sources {_sources} where userQuery(){_filter}"
body["yql"] = yql
if k:
body["hits"] = k
return cls(
app=app,
body=body,
content_field=content_field,
metadata_fields=metadata_fields,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/azure_ai_search.py | from __future__ import annotations
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import get_from_dict_or_env, get_from_env
from pydantic import ConfigDict, model_validator
DEFAULT_URL_SUFFIX = "search.windows.net"
"""Default URL Suffix for endpoint connection - commercial cloud"""
class AzureAISearchRetriever(BaseRetriever):
"""`Azure AI Search` service retriever.
Setup:
See here for more detail: https://python.langchain.com/docs/integrations/retrievers/azure_ai_search/
We will need to install the below dependencies and set the required
environment variables:
.. code-block:: bash
pip install -U langchain-community azure-identity azure-search-documents
export AZURE_AI_SEARCH_SERVICE_NAME="<YOUR_SEARCH_SERVICE_NAME>"
export AZURE_AI_SEARCH_INDEX_NAME="<YOUR_SEARCH_INDEX_NAME>"
export AZURE_AI_SEARCH_API_KEY="<YOUR_API_KEY>"
Key init args:
content_key: str
top_k: int
index_name: str
Instantiate:
.. code-block:: python
from langchain_community.retrievers import AzureAISearchRetriever
retriever = AzureAISearchRetriever(
content_key="content", top_k=1, index_name="langchain-vector-demo"
)
Usage:
.. code-block:: python
retriever.invoke("here is my unstructured query string")
Use within a chain:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import AzureChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = AzureChatOpenAI(azure_deployment="gpt-35-turbo")
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke("...")
""" # noqa: E501
service_name: str = ""
"""Name of Azure AI Search service"""
index_name: str = ""
"""Name of Index inside Azure AI Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2023-11-01"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
top_k: Optional[int] = None
"""Number of results to retrieve. Set to None to retrieve all results."""
filter: Optional[str] = None
"""OData $filter expression to apply to the search query."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_AI_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_AI_SEARCH_INDEX_NAME"
)
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_AI_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
url_suffix = get_from_env("", "AZURE_AI_SEARCH_URL_SUFFIX", DEFAULT_URL_SUFFIX)
if url_suffix in self.service_name and "https://" in self.service_name:
base_url = f"{self.service_name}/"
elif url_suffix in self.service_name and "https://" not in self.service_name:
base_url = f"https://{self.service_name}/"
elif url_suffix not in self.service_name and "https://" in self.service_name:
base_url = f"{self.service_name}.{url_suffix}/"
elif (
url_suffix not in self.service_name and "https://" not in self.service_name
):
base_url = f"https://{self.service_name}.{url_suffix}/"
else:
# pass to Azure to throw a specific error
base_url = self.service_name
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
top_param = f"&$top={self.top_k}" if self.top_k else ""
filter_param = f"&$filter={self.filter}" if self.filter else ""
return base_url + endpoint_path + f"&search={query}" + top_param + filter_param
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
# For backwards compatibility
class AzureCognitiveSearchRetriever(AzureAISearchRetriever):
"""`Azure Cognitive Search` service retriever.
This version of the retriever will soon be
depreciated. Please switch to AzureAISearchRetriever
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/wikipedia.py | from typing import List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper):
"""`Wikipedia API` retriever.
Setup:
Install the ``wikipedia`` dependency:
.. code-block:: bash
pip install -U wikipedia
Instantiate:
.. code-block:: python
from langchain_community.retrievers import WikipediaRetriever
retriever = WikipediaRetriever()
Usage:
.. code-block:: python
docs = retriever.invoke("TOKYO GHOUL")
print(docs[0].page_content[:100])
.. code-block:: none
Tokyo Ghoul (Japanese: 東京喰種(トーキョーグール), Hepburn: Tōkyō Gūru) is a Japanese dark fantasy
Use within a chain:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke(
"Who is the main character in `Tokyo Ghoul` and does he transform into a ghoul?"
)
.. code-block:: none
'The main character in Tokyo Ghoul is Ken Kaneki, who transforms into a ghoul after receiving an organ transplant from a ghoul named Rize.'
""" # noqa: E501
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.load(query=query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/bedrock.py | from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import BaseModel, model_validator
class VectorSearchConfig(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Configuration for vector search."""
numberOfResults: int = 4
class RetrievalConfig(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Configuration for retrieval."""
vectorSearchConfiguration: VectorSearchConfig
class AmazonKnowledgeBasesRetriever(BaseRetriever):
"""Amazon Bedrock Knowledge Bases retriever.
See https://aws.amazon.com/bedrock/knowledge-bases for more info.
Setup:
Install ``langchain-aws``:
.. code-block:: bash
pip install -U langchain-aws
Key init args:
knowledge_base_id: Knowledge Base ID.
region_name: The aws region e.g., `us-west-2`.
Fallback to AWS_DEFAULT_REGION env variable or region specified in
~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
client: boto3 client for bedrock agent runtime.
retrieval_config: Configuration for retrieval.
Instantiate:
.. code-block:: python
from langchain_community.retrievers import AmazonKnowledgeBasesRetriever
retriever = AmazonKnowledgeBasesRetriever(
knowledge_base_id="<knowledge-base-id>",
retrieval_config={
"vectorSearchConfiguration": {
"numberOfResults": 4
}
},
)
Usage:
.. code-block:: python
query = "..."
retriever.invoke(query)
Use within a chain:
.. code-block:: python
from langchain_aws import ChatBedrockConverse
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = ChatBedrockConverse(
model_id="anthropic.claude-3-5-sonnet-20240620-v1:0"
)
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke("...")
""" # noqa: E501
knowledge_base_id: str
region_name: Optional[str] = None
credentials_profile_name: Optional[str] = None
endpoint_url: Optional[str] = None
client: Any
retrieval_config: RetrievalConfig
@model_validator(mode="before")
@classmethod
def create_client(cls, values: Dict[str, Any]) -> Any:
if values.get("client") is not None:
return values
try:
import boto3
from botocore.client import Config
from botocore.exceptions import UnknownServiceError
if values.get("credentials_profile_name"):
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {
"config": Config(
connect_timeout=120, read_timeout=120, retries={"max_attempts": 0}
)
}
if values.get("region_name"):
client_params["region_name"] = values["region_name"]
if values.get("endpoint_url"):
client_params["endpoint_url"] = values["endpoint_url"]
values["client"] = session.client("bedrock-agent-runtime", **client_params)
return values
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except UnknownServiceError as e:
raise ImportError(
"Ensure that you have installed the latest boto3 package "
"that contains the API for `bedrock-runtime-agent`."
) from e
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
response = self.client.retrieve(
retrievalQuery={"text": query.strip()},
knowledgeBaseId=self.knowledge_base_id,
retrievalConfiguration=self.retrieval_config.dict(),
)
results = response["retrievalResults"]
documents = []
for result in results:
content = result["content"]["text"]
result.pop("content")
if "score" not in result:
result["score"] = 0
if "metadata" in result:
result["source_metadata"] = result.pop("metadata")
documents.append(
Document(
page_content=content,
metadata=result,
)
)
return documents
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/nanopq.py | from __future__ import annotations
import concurrent.futures
from typing import Any, Iterable, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
class NanoPQRetriever(BaseRetriever):
"""`NanoPQ retriever."""
embeddings: Embeddings
"""Embeddings model to use."""
index: Any = None
"""Index of embeddings."""
texts: List[str]
"""List of texts to index."""
metadatas: Optional[List[dict]] = None
"""List of metadatas corresponding with each text."""
k: int = 4
"""Number of results to return."""
relevancy_threshold: Optional[float] = None
"""Threshold for relevancy."""
subspace: int = 4
"""No of subspaces to be created, should be a multiple of embedding shape"""
clusters: int = 128
"""No of clusters to be created"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embeddings: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> NanoPQRetriever:
index = create_index(texts, embeddings)
return cls(
embeddings=embeddings,
index=index,
texts=texts,
metadatas=metadatas,
**kwargs,
)
@classmethod
def from_documents(
cls,
documents: Iterable[Document],
embeddings: Embeddings,
**kwargs: Any,
) -> NanoPQRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts, embeddings=embeddings, metadatas=metadatas, **kwargs
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
try:
from nanopq import PQ
except ImportError:
raise ImportError(
"Could not import nanopq, please install with `pip install " "nanopq`."
)
query_embeds = np.array(self.embeddings.embed_query(query))
try:
pq = PQ(M=self.subspace, Ks=self.clusters, verbose=True).fit(
self.index.astype("float32")
)
except AssertionError:
error_message = (
"Received params: training_sample={training_sample}, "
"n_cluster={n_clusters}, subspace={subspace}, "
"embedding_shape={embedding_shape}. Issue with the combination. "
"Please retrace back to find the exact error"
).format(
training_sample=self.index.shape[0],
n_clusters=self.clusters,
subspace=self.subspace,
embedding_shape=self.index.shape[1],
)
raise RuntimeError(error_message)
index_code = pq.encode(vecs=self.index.astype("float32"))
dt = pq.dtable(query=query_embeds.astype("float32"))
dists = dt.adist(codes=index_code)
sorted_ix = np.argsort(dists)
top_k_results = [
Document(
page_content=self.texts[row],
metadata=self.metadatas[row] if self.metadatas else {},
)
for row in sorted_ix[0 : self.k]
]
return top_k_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/kendra.py | import re
from abc import ABC, abstractmethod
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Union,
)
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import (
BaseModel,
Field,
model_validator,
validator,
)
from typing_extensions import Annotated
def clean_excerpt(excerpt: str) -> str:
"""Clean an excerpt from Kendra.
Args:
excerpt: The excerpt to clean.
Returns:
The cleaned excerpt.
"""
if not excerpt:
return excerpt
res = re.sub(r"\s+", " ", excerpt).replace("...", "")
return res
def combined_text(item: "ResultItem") -> str:
"""Combine a ResultItem title and excerpt into a single string.
Args:
item: the ResultItem of a Kendra search.
Returns:
A combined text of the title and excerpt of the given item.
"""
text = ""
title = item.get_title()
if title:
text += f"Document Title: {title}\n"
excerpt = clean_excerpt(item.get_excerpt())
if excerpt:
text += f"Document Excerpt: \n{excerpt}\n"
return text
DocumentAttributeValueType = Union[str, int, List[str], None]
"""Possible types of a DocumentAttributeValue.
Dates are also represented as str.
"""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class Highlight(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Information that highlights the keywords in the excerpt."""
BeginOffset: int
"""The zero-based location in the excerpt where the highlight starts."""
EndOffset: int
"""The zero-based location in the excerpt where the highlight ends."""
TopAnswer: Optional[bool]
"""Indicates whether the result is the best one."""
Type: Optional[str]
"""The highlight type: STANDARD or THESAURUS_SYNONYM."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class TextWithHighLights(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Text with highlights."""
Text: str
"""The text."""
Highlights: Optional[Any]
"""The highlights."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class AdditionalResultAttributeValue( # type: ignore[call-arg]
BaseModel, extra="allow"
):
"""Value of an additional result attribute."""
TextWithHighlightsValue: TextWithHighLights
"""The text with highlights value."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class AdditionalResultAttribute(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Additional result attribute."""
Key: str
"""The key of the attribute."""
ValueType: Literal["TEXT_WITH_HIGHLIGHTS_VALUE"]
"""The type of the value."""
Value: AdditionalResultAttributeValue
"""The value of the attribute."""
def get_value_text(self) -> str:
return self.Value.TextWithHighlightsValue.Text
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class DocumentAttributeValue(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Value of a document attribute."""
DateValue: Optional[str]
"""The date expressed as an ISO 8601 string."""
LongValue: Optional[int]
"""The long value."""
StringListValue: Optional[List[str]]
"""The string list value."""
StringValue: Optional[str]
"""The string value."""
@property
def value(self) -> DocumentAttributeValueType:
"""The only defined document attribute value or None.
According to Amazon Kendra, you can only provide one
value for a document attribute.
"""
if self.DateValue:
return self.DateValue
if self.LongValue:
return self.LongValue
if self.StringListValue:
return self.StringListValue
if self.StringValue:
return self.StringValue
return None
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class DocumentAttribute(BaseModel, extra="allow"): # type: ignore[call-arg]
"""Document attribute."""
Key: str
"""The key of the attribute."""
Value: DocumentAttributeValue
"""The value of the attribute."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class ResultItem(BaseModel, ABC, extra="allow"): # type: ignore[call-arg]
"""Base class of a result item."""
Id: Optional[str]
"""The ID of the relevant result item."""
DocumentId: Optional[str]
"""The document ID."""
DocumentURI: Optional[str]
"""The document URI."""
DocumentAttributes: Optional[List[DocumentAttribute]] = []
"""The document attributes."""
ScoreAttributes: Optional[dict]
"""The kendra score confidence"""
@abstractmethod
def get_title(self) -> str:
"""Document title."""
@abstractmethod
def get_excerpt(self) -> str:
"""Document excerpt or passage original content as retrieved by Kendra."""
def get_additional_metadata(self) -> dict:
"""Document additional metadata dict.
This returns any extra metadata except these:
* result_id
* document_id
* source
* title
* excerpt
* document_attributes
"""
return {}
def get_document_attributes_dict(self) -> Dict[str, DocumentAttributeValueType]:
"""Document attributes dict."""
return {attr.Key: attr.Value.value for attr in (self.DocumentAttributes or [])}
def get_score_attribute(self) -> str:
"""Document Score Confidence"""
if self.ScoreAttributes is not None:
return self.ScoreAttributes["ScoreConfidence"]
else:
return "NOT_AVAILABLE"
def to_doc(
self, page_content_formatter: Callable[["ResultItem"], str] = combined_text
) -> Document:
"""Converts this item to a Document."""
page_content = page_content_formatter(self)
metadata = self.get_additional_metadata()
metadata.update(
{
"result_id": self.Id,
"document_id": self.DocumentId,
"source": self.DocumentURI,
"title": self.get_title(),
"excerpt": self.get_excerpt(),
"document_attributes": self.get_document_attributes_dict(),
"score": self.get_score_attribute(),
}
)
return Document(page_content=page_content, metadata=metadata)
class QueryResultItem(ResultItem):
"""Query API result item."""
DocumentTitle: TextWithHighLights
"""The document title."""
FeedbackToken: Optional[str]
"""Identifies a particular result from a particular query."""
Format: Optional[str]
"""
If the Type is ANSWER, then format is either:
* TABLE: a table excerpt is returned in TableExcerpt;
* TEXT: a text excerpt is returned in DocumentExcerpt.
"""
Type: Optional[str]
"""Type of result: DOCUMENT or QUESTION_ANSWER or ANSWER"""
AdditionalAttributes: Optional[List[AdditionalResultAttribute]] = []
"""One or more additional attributes associated with the result."""
DocumentExcerpt: Optional[TextWithHighLights]
"""Excerpt of the document text."""
def get_title(self) -> str:
return self.DocumentTitle.Text
def get_attribute_value(self) -> str:
if not self.AdditionalAttributes:
return ""
if not self.AdditionalAttributes[0]:
return ""
else:
return self.AdditionalAttributes[0].get_value_text()
def get_excerpt(self) -> str:
if (
self.AdditionalAttributes
and self.AdditionalAttributes[0].Key == "AnswerText"
):
excerpt = self.get_attribute_value()
elif self.DocumentExcerpt:
excerpt = self.DocumentExcerpt.Text
else:
excerpt = ""
return excerpt
def get_additional_metadata(self) -> dict:
additional_metadata = {"type": self.Type}
return additional_metadata
class RetrieveResultItem(ResultItem):
"""Retrieve API result item."""
DocumentTitle: Optional[str]
"""The document title."""
Content: Optional[str]
"""The content of the item."""
def get_title(self) -> str:
return self.DocumentTitle or ""
def get_excerpt(self) -> str:
return self.Content or ""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class QueryResult(BaseModel, extra="allow"): # type: ignore[call-arg]
"""`Amazon Kendra Query API` search result.
It is composed of:
* Relevant suggested answers: either a text excerpt or table excerpt.
* Matching FAQs or questions-answer from your FAQ file.
* Documents including an excerpt of each document with its title.
"""
ResultItems: List[QueryResultItem]
"""The result items."""
# Unexpected keyword argument "extra" for "__init_subclass__" of "object"
class RetrieveResult(BaseModel, extra="allow"): # type: ignore[call-arg]
"""`Amazon Kendra Retrieve API` search result.
It is composed of:
* relevant passages or text excerpts given an input query.
"""
QueryId: str
"""The ID of the query."""
ResultItems: List[RetrieveResultItem]
"""The result items."""
KENDRA_CONFIDENCE_MAPPING = {
"NOT_AVAILABLE": 0.0,
"LOW": 0.25,
"MEDIUM": 0.50,
"HIGH": 0.75,
"VERY_HIGH": 1.0,
}
class AmazonKendraRetriever(BaseRetriever):
"""`Amazon Kendra Index` retriever.
Args:
index_id: Kendra index id
region_name: The aws region e.g., `us-west-2`.
Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config.
credentials_profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
top_k: No of results to return
attribute_filter: Additional filtering of results based on metadata
See: https://docs.aws.amazon.com/kendra/latest/APIReference
document_relevance_override_configurations: Overrides relevance tuning
configurations of fields/attributes set at the index level
See: https://docs.aws.amazon.com/kendra/latest/APIReference
page_content_formatter: generates the Document page_content
allowing access to all result item attributes. By default, it uses
the item's title and excerpt.
client: boto3 client for Kendra
user_context: Provides information about the user context
See: https://docs.aws.amazon.com/kendra/latest/APIReference
Example:
.. code-block:: python
retriever = AmazonKendraRetriever(
index_id="c0806df7-e76b-4bce-9b5c-d5582f6b1a03"
)
"""
index_id: str
region_name: Optional[str] = None
credentials_profile_name: Optional[str] = None
top_k: int = 3
attribute_filter: Optional[Dict] = None
document_relevance_override_configurations: Optional[List[Dict]] = None
page_content_formatter: Callable[[ResultItem], str] = combined_text
client: Any
user_context: Optional[Dict] = None
min_score_confidence: Annotated[Optional[float], Field(ge=0.0, le=1.0)]
@validator("top_k")
def validate_top_k(cls, value: int) -> int:
if value < 0:
raise ValueError(f"top_k ({value}) cannot be negative.")
return value
@model_validator(mode="before")
@classmethod
def create_client(cls, values: Dict[str, Any]) -> Any:
top_k = values.get("top_k")
if top_k is not None and top_k < 0:
raise ValueError(f"top_k ({top_k}) cannot be negative.")
if values.get("client") is not None:
return values
try:
import boto3
if values.get("credentials_profile_name"):
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values.get("region_name"):
client_params["region_name"] = values["region_name"]
values["client"] = session.client("kendra", **client_params)
return values
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
def _kendra_query(self, query: str) -> Sequence[ResultItem]:
kendra_kwargs = {
"IndexId": self.index_id,
# truncate the query to ensure that
# there is no validation exception from Kendra.
"QueryText": query.strip()[0:999],
"PageSize": self.top_k,
}
if self.attribute_filter is not None:
kendra_kwargs["AttributeFilter"] = self.attribute_filter
if self.document_relevance_override_configurations is not None:
kendra_kwargs["DocumentRelevanceOverrideConfigurations"] = (
self.document_relevance_override_configurations
)
if self.user_context is not None:
kendra_kwargs["UserContext"] = self.user_context
response = self.client.retrieve(**kendra_kwargs)
r_result = RetrieveResult.parse_obj(response)
if r_result.ResultItems:
return r_result.ResultItems
# Retrieve API returned 0 results, fall back to Query API
response = self.client.query(**kendra_kwargs)
q_result = QueryResult.parse_obj(response)
return q_result.ResultItems
def _get_top_k_docs(self, result_items: Sequence[ResultItem]) -> List[Document]:
top_docs = [
item.to_doc(self.page_content_formatter)
for item in result_items[: self.top_k]
]
return top_docs
def _filter_by_score_confidence(self, docs: List[Document]) -> List[Document]:
"""
Filter out the records that have a score confidence
greater than the required threshold.
"""
if not self.min_score_confidence:
return docs
filtered_docs = [
item
for item in docs
if (
item.metadata.get("score") is not None
and isinstance(item.metadata["score"], str)
and KENDRA_CONFIDENCE_MAPPING.get(item.metadata["score"], 0.0)
>= self.min_score_confidence
)
]
return filtered_docs
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Run search on Kendra index and get top k documents
Example:
.. code-block:: python
docs = retriever.invoke('This is my query')
"""
result_items = self._kendra_query(query)
top_k_docs = self._get_top_k_docs(result_items)
return self._filter_by_score_confidence(top_k_docs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/arxiv.py | from typing import List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_community.utilities.arxiv import ArxivAPIWrapper
class ArxivRetriever(BaseRetriever, ArxivAPIWrapper):
"""`Arxiv` retriever.
Setup:
Install ``arxiv``:
.. code-block:: bash
pip install -U arxiv
Key init args:
load_max_docs: int
maximum number of documents to load
get_ful_documents: bool
whether to return full document text or snippets
Instantiate:
.. code-block:: python
from langchain_community.retrievers import ArxivRetriever
retriever = ArxivRetriever(
load_max_docs=2,
get_ful_documents=True,
)
Usage:
.. code-block:: python
docs = retriever.invoke("What is the ImageBind model?")
docs[0].metadata
.. code-block:: none
{'Entry ID': 'http://arxiv.org/abs/2305.05665v2',
'Published': datetime.date(2023, 5, 31),
'Title': 'ImageBind: One Embedding Space To Bind Them All',
'Authors': 'Rohit Girdhar, Alaaeldin El-Nouby, Zhuang Liu, Mannat Singh, Kalyan Vasudev Alwala, Armand Joulin, Ishan Misra'}
Use within a chain:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke("What is the ImageBind model?")
.. code-block:: none
'The ImageBind model is an approach to learn a joint embedding across six different modalities - images, text, audio, depth, thermal, and IMU data...'
""" # noqa: E501
get_full_documents: bool = False
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
if self.get_full_documents:
return self.load(query=query)
else:
return self.get_summaries_as_docs(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/metal.py | from typing import Any, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import model_validator
class MetalRetriever(BaseRetriever):
"""`Metal API` retriever."""
client: Any
"""The Metal client to use."""
params: Optional[dict] = None
"""The parameters to pass to the Metal client."""
@model_validator(mode="before")
@classmethod
def validate_client(cls, values: dict) -> Any:
"""Validate that the client is of the correct type."""
from metal_sdk.metal import Metal
if "client" in values:
client = values["client"]
if not isinstance(client, Metal):
raise ValueError(
"Got unexpected client, should be of type metal_sdk.metal.Metal. "
f"Instead, got {type(client)}"
)
values["params"] = values.get("params", {})
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
results = self.client.search({"text": query}, **self.params)
final_results = []
for r in results["data"]:
metadata = {k: v for k, v in r.items() if k != "text"}
final_results.append(Document(page_content=r["text"], metadata=metadata))
return final_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/google_vertex_ai_search.py | """Retriever wrapper for Google Vertex AI Search."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, Field, model_validator
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.api_core.client_options import ClientOptions
from google.cloud.discoveryengine_v1beta import SearchRequest, SearchResult
class _BaseGoogleVertexAISearchRetriever(BaseModel):
project_id: str
"""Google Cloud Project ID."""
data_store_id: Optional[str] = None
"""Vertex AI Search data store ID."""
search_engine_id: Optional[str] = None
"""Vertex AI Search app ID."""
location_id: str = "global"
"""Vertex AI Search data store location."""
serving_config_id: str = "default_config"
"""Vertex AI Search serving config ID."""
credentials: Any = None
"""The default custom credentials (google.auth.credentials.Credentials) to use
when making API calls. If not provided, credentials will be ascertained from
the environment."""
engine_data_type: int = Field(default=0, ge=0, le=3)
""" Defines the Vertex AI Search app data type
0 - Unstructured data
1 - Structured data
2 - Website data
3 - Blended search
"""
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validates the environment."""
try:
from google.cloud import discoveryengine_v1beta # noqa: F401
except ImportError as exc:
raise ImportError(
"google.cloud.discoveryengine is not installed."
"Please install it with pip install "
"google-cloud-discoveryengine>=0.11.10"
) from exc
try:
from google.api_core.exceptions import InvalidArgument # noqa: F401
except ImportError as exc:
raise ImportError(
"google.api_core.exceptions is not installed. "
"Please install it with pip install google-api-core"
) from exc
values["project_id"] = get_from_dict_or_env(values, "project_id", "PROJECT_ID")
try:
values["data_store_id"] = get_from_dict_or_env(
values, "data_store_id", "DATA_STORE_ID"
)
values["search_engine_id"] = get_from_dict_or_env(
values, "search_engine_id", "SEARCH_ENGINE_ID"
)
except Exception:
pass
return values
@property
def client_options(self) -> "ClientOptions":
from google.api_core.client_options import ClientOptions
return ClientOptions(
api_endpoint=(
f"{self.location_id}-discoveryengine.googleapis.com"
if self.location_id != "global"
else None
)
)
def _convert_structured_search_response(
self, results: Sequence[SearchResult]
) -> List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
import json
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
documents.append(
Document(
page_content=json.dumps(document_dict.get("struct_data", {})),
metadata={"id": document_dict["id"], "name": document_dict["name"]},
)
)
return documents
def _convert_unstructured_search_response(
self, results: Sequence[SearchResult], chunk_type: str
) -> List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
derived_struct_data = document_dict.get("derived_struct_data")
if not derived_struct_data:
continue
doc_metadata = document_dict.get("struct_data", {})
doc_metadata["id"] = document_dict["id"]
if chunk_type not in derived_struct_data:
continue
for chunk in derived_struct_data[chunk_type]:
chunk_metadata = doc_metadata.copy()
chunk_metadata["source"] = derived_struct_data.get("link", "")
if chunk_type == "extractive_answers":
chunk_metadata["source"] += f":{chunk.get('pageNumber', '')}"
documents.append(
Document(
page_content=chunk.get("content", ""), metadata=chunk_metadata
)
)
return documents
def _convert_website_search_response(
self, results: Sequence[SearchResult], chunk_type: str
) -> List[Document]:
"""Converts a sequence of search results to a list of LangChain documents."""
from google.protobuf.json_format import MessageToDict
documents: List[Document] = []
for result in results:
document_dict = MessageToDict(
result.document._pb, preserving_proto_field_name=True
)
derived_struct_data = document_dict.get("derived_struct_data")
if not derived_struct_data:
continue
doc_metadata = document_dict.get("struct_data", {})
doc_metadata["id"] = document_dict["id"]
doc_metadata["source"] = derived_struct_data.get("link", "")
if chunk_type not in derived_struct_data:
continue
text_field = "snippet" if chunk_type == "snippets" else "content"
for chunk in derived_struct_data[chunk_type]:
documents.append(
Document(
page_content=chunk.get(text_field, ""), metadata=doc_metadata
)
)
if not documents:
print(f"No {chunk_type} could be found.") # noqa: T201
if chunk_type == "extractive_answers":
print( # noqa: T201
"Make sure that your data store is using Advanced Website "
"Indexing.\n"
"https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing"
)
return documents
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.VertexAISearchRetriever",
)
class GoogleVertexAISearchRetriever(BaseRetriever, _BaseGoogleVertexAISearchRetriever):
"""`Google Vertex AI Search` retriever.
For a detailed explanation of the Vertex AI Search concepts
and configuration parameters, refer to the product documentation.
https://cloud.google.com/generative-ai-app-builder/docs/enterprise-search-introduction
"""
filter: Optional[str] = None
"""Filter expression."""
get_extractive_answers: bool = False
"""If True return Extractive Answers, otherwise return Extractive Segments or Snippets.""" # noqa: E501
max_documents: int = Field(default=5, ge=1, le=100)
"""The maximum number of documents to return."""
max_extractive_answer_count: int = Field(default=1, ge=1, le=5)
"""The maximum number of extractive answers returned in each search result.
At most 5 answers will be returned for each SearchResult.
"""
max_extractive_segment_count: int = Field(default=1, ge=1, le=1)
"""The maximum number of extractive segments returned in each search result.
Currently one segment will be returned for each SearchResult.
"""
query_expansion_condition: int = Field(default=1, ge=0, le=2)
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified query expansion condition. In this case, server behavior defaults
to disabled
1 - Disabled query expansion. Only the exact search query is used, even if
SearchResponse.total_size is zero.
2 - Automatic query expansion built by the Search API.
"""
spell_correction_mode: int = Field(default=2, ge=0, le=2)
"""Specification to determine under which conditions query expansion should occur.
0 - Unspecified spell correction mode. In this case, server behavior defaults
to auto.
1 - Suggestion only. Search API will try to find a spell suggestion if there is any
and put in the `SearchResponse.corrected_query`.
The spell suggestion will not be used as the search query.
2 - Automatic spell correction built by the Search API.
Search will be based on the corrected query if found.
"""
# type is SearchServiceClient but can't be set due to optional imports
_client: Any = None
_serving_config: str
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="ignore",
)
def __init__(self, **kwargs: Any) -> None:
"""Initializes private fields."""
try:
from google.cloud.discoveryengine_v1beta import SearchServiceClient
except ImportError as exc:
raise ImportError(
"google.cloud.discoveryengine is not installed."
"Please install it with pip install google-cloud-discoveryengine"
) from exc
super().__init__(**kwargs)
# For more information, refer to:
# https://cloud.google.com/generative-ai-app-builder/docs/locations#specify_a_multi-region_for_your_data_store
self._client = SearchServiceClient(
credentials=self.credentials,
client_options=self.client_options,
client_info=get_client_info(module="vertex-ai-search"),
)
if self.engine_data_type == 3 and not self.search_engine_id:
raise ValueError(
"search_engine_id must be specified for blended search apps."
)
if self.search_engine_id:
self._serving_config = f"projects/{self.project_id}/locations/{self.location_id}/collections/default_collection/engines/{self.search_engine_id}/servingConfigs/default_config" # noqa: E501
elif self.data_store_id:
self._serving_config = self._client.serving_config_path(
project=self.project_id,
location=self.location_id,
data_store=self.data_store_id,
serving_config=self.serving_config_id,
)
else:
raise ValueError(
"Either data_store_id or search_engine_id must be specified."
)
def _create_search_request(self, query: str) -> SearchRequest:
"""Prepares a SearchRequest object."""
from google.cloud.discoveryengine_v1beta import SearchRequest
query_expansion_spec = SearchRequest.QueryExpansionSpec(
condition=self.query_expansion_condition,
)
spell_correction_spec = SearchRequest.SpellCorrectionSpec(
mode=self.spell_correction_mode
)
if self.engine_data_type == 0:
if self.get_extractive_answers:
extractive_content_spec = (
SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_answer_count=self.max_extractive_answer_count,
)
)
else:
extractive_content_spec = (
SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_segment_count=self.max_extractive_segment_count,
)
)
content_search_spec = SearchRequest.ContentSearchSpec(
extractive_content_spec=extractive_content_spec
)
elif self.engine_data_type == 1:
content_search_spec = None
elif self.engine_data_type in (2, 3):
content_search_spec = SearchRequest.ContentSearchSpec(
extractive_content_spec=SearchRequest.ContentSearchSpec.ExtractiveContentSpec(
max_extractive_answer_count=self.max_extractive_answer_count,
),
snippet_spec=SearchRequest.ContentSearchSpec.SnippetSpec(
return_snippet=True
),
)
else:
raise NotImplementedError(
"Only data store type 0 (Unstructured), 1 (Structured),"
"2 (Website), or 3 (Blended) are supported currently."
+ f" Got {self.engine_data_type}"
)
return SearchRequest(
query=query,
filter=self.filter,
serving_config=self._serving_config,
page_size=self.max_documents,
content_search_spec=content_search_spec,
query_expansion_spec=query_expansion_spec,
spell_correction_spec=spell_correction_spec,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
return self.get_relevant_documents_with_response(query)[0]
def get_relevant_documents_with_response(
self, query: str
) -> Tuple[List[Document], Any]:
from google.api_core.exceptions import InvalidArgument
search_request = self._create_search_request(query)
try:
response = self._client.search(search_request)
except InvalidArgument as exc:
raise type(exc)(
exc.message
+ " This might be due to engine_data_type not set correctly."
)
if self.engine_data_type == 0:
chunk_type = (
"extractive_answers"
if self.get_extractive_answers
else "extractive_segments"
)
documents = self._convert_unstructured_search_response(
response.results, chunk_type
)
elif self.engine_data_type == 1:
documents = self._convert_structured_search_response(response.results)
elif self.engine_data_type in (2, 3):
chunk_type = (
"extractive_answers" if self.get_extractive_answers else "snippets"
)
documents = self._convert_website_search_response(
response.results, chunk_type
)
else:
raise NotImplementedError(
"Only data store type 0 (Unstructured), 1 (Structured),"
"2 (Website), or 3 (Blended) are supported currently."
+ f" Got {self.engine_data_type}"
)
return documents, response
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.VertexAIMultiTurnSearchRetriever",
)
class GoogleVertexAIMultiTurnSearchRetriever(
BaseRetriever, _BaseGoogleVertexAISearchRetriever
):
"""`Google Vertex AI Search` retriever for multi-turn conversations."""
conversation_id: str = "-"
"""Vertex AI Search Conversation ID."""
# type is ConversationalSearchServiceClient but can't be set due to optional imports
_client: Any = None
_serving_config: str
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="ignore",
)
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
from google.cloud.discoveryengine_v1beta import (
ConversationalSearchServiceClient,
)
self._client = ConversationalSearchServiceClient(
credentials=self.credentials,
client_options=self.client_options,
client_info=get_client_info(module="vertex-ai-search"),
)
if not self.data_store_id:
raise ValueError("data_store_id is required for MultiTurnSearchRetriever.")
self._serving_config = self._client.serving_config_path(
project=self.project_id,
location=self.location_id,
data_store=self.data_store_id,
serving_config=self.serving_config_id,
)
if self.engine_data_type == 1 or self.engine_data_type == 3:
raise NotImplementedError(
"Data store type 1 (Structured) and 3 (Blended)"
"is not currently supported for multi-turn search."
+ f" Got {self.engine_data_type}"
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
from google.cloud.discoveryengine_v1beta import (
ConverseConversationRequest,
TextInput,
)
request = ConverseConversationRequest(
name=self._client.conversation_path(
self.project_id,
self.location_id,
self.data_store_id,
self.conversation_id,
),
serving_config=self._serving_config,
query=TextInput(input=query),
)
response = self._client.converse_conversation(request)
if self.engine_data_type == 2:
return self._convert_website_search_response(
response.search_results, "extractive_answers"
)
return self._convert_unstructured_search_response(
response.search_results, "extractive_answers"
)
class GoogleCloudEnterpriseSearchRetriever(GoogleVertexAISearchRetriever):
"""`Google Vertex Search API` retriever alias for backwards compatibility.
DEPRECATED: Use `GoogleVertexAISearchRetriever` instead.
"""
def __init__(self, **data: Any):
import warnings
warnings.warn(
"GoogleCloudEnterpriseSearchRetriever is deprecated, use GoogleVertexAISearchRetriever", # noqa: E501
DeprecationWarning,
)
super().__init__(**data)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/knn.py | """KNN Retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb"""
from __future__ import annotations
import concurrent.futures
from typing import Any, Iterable, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
class KNNRetriever(BaseRetriever):
"""`KNN` retriever."""
embeddings: Embeddings
"""Embeddings model to use."""
index: Any = None
"""Index of embeddings."""
texts: List[str]
"""List of texts to index."""
metadatas: Optional[List[dict]] = None
"""List of metadatas corresponding with each text."""
k: int = 4
"""Number of results to return."""
relevancy_threshold: Optional[float] = None
"""Threshold for relevancy."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embeddings: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> KNNRetriever:
index = create_index(texts, embeddings)
return cls(
embeddings=embeddings,
index=index,
texts=texts,
metadatas=metadatas,
**kwargs,
)
@classmethod
def from_documents(
cls,
documents: Iterable[Document],
embeddings: Embeddings,
**kwargs: Any,
) -> KNNRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts, embeddings=embeddings, metadatas=metadatas, **kwargs
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
query_embeds = np.array(self.embeddings.embed_query(query))
# calc L2 norm
index_embeds = self.index / np.sqrt((self.index**2).sum(1, keepdims=True))
query_embeds = query_embeds / np.sqrt((query_embeds**2).sum())
similarities = index_embeds.dot(query_embeds)
sorted_ix = np.argsort(-similarities)
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = [
Document(
page_content=self.texts[row],
metadata=self.metadatas[row] if self.metadatas else {},
)
for row in sorted_ix[0 : self.k]
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
)
]
return top_k_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/tavily_search_api.py | import os
from enum import Enum
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class SearchDepth(Enum):
"""Search depth as enumerator."""
BASIC = "basic"
ADVANCED = "advanced"
class TavilySearchAPIRetriever(BaseRetriever):
"""Tavily Search API retriever.
Setup:
Install ``langchain-community`` and set environment variable ``TAVILY_API_KEY``.
.. code-block:: bash
pip install -U langchain-community
export TAVILY_API_KEY="your-api-key"
Key init args:
k: int
Number of results to include.
include_generated_answer: bool
Include a generated answer with results
include_raw_content: bool
Include raw content with results.
include_images: bool
Return images in addition to text.
Instantiate:
.. code-block:: python
from langchain_community.retrievers import TavilySearchAPIRetriever
retriever = TavilySearchAPIRetriever(k=3)
Usage:
.. code-block:: python
query = "what year was breath of the wild released?"
retriever.invoke(query)
Use within a chain:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke("how many units did bretch of the wild sell in 2020")
""" # noqa: E501
k: int = 10
include_generated_answer: bool = False
include_raw_content: bool = False
include_images: bool = False
search_depth: SearchDepth = SearchDepth.BASIC
include_domains: Optional[List[str]] = None
exclude_domains: Optional[List[str]] = None
kwargs: Optional[Dict[str, Any]] = {}
api_key: Optional[str] = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
try:
try:
from tavily import TavilyClient
except ImportError:
# Older of tavily used Client
from tavily import Client as TavilyClient
except ImportError:
raise ImportError(
"Tavily python package not found. "
"Please install it with `pip install tavily-python`."
)
tavily = TavilyClient(api_key=self.api_key or os.environ["TAVILY_API_KEY"])
max_results = self.k if not self.include_generated_answer else self.k - 1
response = tavily.search(
query=query,
max_results=max_results,
search_depth=self.search_depth.value,
include_answer=self.include_generated_answer,
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
include_raw_content=self.include_raw_content,
include_images=self.include_images,
**self.kwargs,
)
docs = [
Document(
page_content=result.get("content", "")
if not self.include_raw_content
else result.get("raw_content", ""),
metadata={
"title": result.get("title", ""),
"source": result.get("url", ""),
**{
k: v
for k, v in result.items()
if k not in ("content", "title", "url", "raw_content")
},
"images": response.get("images"),
},
)
for result in response.get("results")
]
if self.include_generated_answer:
docs = [
Document(
page_content=response.get("answer", ""),
metadata={
"title": "Suggested Answer",
"source": "https://tavily.com/",
},
),
*docs,
]
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/asknews.py | import os
import re
from typing import Any, Dict, List, Literal, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class AskNewsRetriever(BaseRetriever):
"""AskNews retriever."""
k: int = 10
offset: int = 0
start_timestamp: Optional[int] = None
end_timestamp: Optional[int] = None
method: Literal["nl", "kw"] = "nl"
categories: List[
Literal[
"All",
"Business",
"Crime",
"Politics",
"Science",
"Sports",
"Technology",
"Military",
"Health",
"Entertainment",
"Finance",
"Culture",
"Climate",
"Environment",
"World",
]
] = ["All"]
historical: bool = False
similarity_score_threshold: float = 0.5
kwargs: Optional[Dict[str, Any]] = {}
client_id: Optional[str] = None
client_secret: Optional[str] = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
try:
from asknews_sdk import AskNewsSDK
except ImportError:
raise ImportError(
"AskNews python package not found. "
"Please install it with `pip install asknews`."
)
an_client = AskNewsSDK(
client_id=self.client_id or os.environ["ASKNEWS_CLIENT_ID"],
client_secret=self.client_secret or os.environ["ASKNEWS_CLIENT_SECRET"],
scopes=["news"],
)
response = an_client.news.search_news(
query=query,
n_articles=self.k,
start_timestamp=self.start_timestamp,
end_timestamp=self.end_timestamp,
method=self.method,
categories=self.categories,
historical=self.historical,
similarity_score_threshold=self.similarity_score_threshold,
offset=self.offset,
doc_start_delimiter="<doc>",
doc_end_delimiter="</doc>",
return_type="both",
**self.kwargs,
)
return self._extract_documents(response)
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
try:
from asknews_sdk import AsyncAskNewsSDK
except ImportError:
raise ImportError(
"AskNews python package not found. "
"Please install it with `pip install asknews`."
)
an_client = AsyncAskNewsSDK(
client_id=self.client_id or os.environ["ASKNEWS_CLIENT_ID"],
client_secret=self.client_secret or os.environ["ASKNEWS_CLIENT_SECRET"],
scopes=["news"],
)
response = await an_client.news.search_news(
query=query,
n_articles=self.k,
start_timestamp=self.start_timestamp,
end_timestamp=self.end_timestamp,
method=self.method,
categories=self.categories,
historical=self.historical,
similarity_score_threshold=self.similarity_score_threshold,
offset=self.offset,
return_type="both",
doc_start_delimiter="<doc>",
doc_end_delimiter="</doc>",
**self.kwargs,
)
return self._extract_documents(response)
def _extract_documents(self, response: Any) -> List[Document]:
"""Extract documents from an api response."""
from asknews_sdk.dto.news import SearchResponse
sr: SearchResponse = response
matches = re.findall(r"<doc>(.*?)</doc>", sr.as_string, re.DOTALL)
docs = [
Document(
page_content=matches[i].strip(),
metadata={
"title": sr.as_dicts[i].title,
"source": str(sr.as_dicts[i].article_url)
if sr.as_dicts[i].article_url
else None,
"images": sr.as_dicts[i].image_url,
},
)
for i in range(len(matches))
]
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/dria_index.py | """Wrapper around Dria Retriever."""
from typing import Any, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_community.utilities import DriaAPIWrapper
class DriaRetriever(BaseRetriever):
"""`Dria` retriever using the DriaAPIWrapper."""
api_wrapper: DriaAPIWrapper
def __init__(self, api_key: str, contract_id: Optional[str] = None, **kwargs: Any):
"""
Initialize the DriaRetriever with a DriaAPIWrapper instance.
Args:
api_key: The API key for Dria.
contract_id: The contract ID of the knowledge base to interact with.
"""
api_wrapper = DriaAPIWrapper(api_key=api_key, contract_id=contract_id)
super().__init__(api_wrapper=api_wrapper, **kwargs) # type: ignore[call-arg]
def create_knowledge_base(
self,
name: str,
description: str,
category: str = "Unspecified",
embedding: str = "jina",
) -> str:
"""Create a new knowledge base in Dria.
Args:
name: The name of the knowledge base.
description: The description of the knowledge base.
category: The category of the knowledge base.
embedding: The embedding model to use for the knowledge base.
Returns:
The ID of the created knowledge base.
"""
response = self.api_wrapper.create_knowledge_base(
name, description, category, embedding
)
return response
def add_texts(
self,
texts: List,
) -> None:
"""Add texts to the Dria knowledge base.
Args:
texts: An iterable of texts and metadatas to add to the knowledge base.
Returns:
List of IDs representing the added texts.
"""
data = [{"text": text["text"], "metadata": text["metadata"]} for text in texts]
self.api_wrapper.insert_data(data)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Retrieve relevant documents from Dria based on a query.
Args:
query: The query string to search for in the knowledge base.
run_manager: Callback manager for the retriever run.
Returns:
A list of Documents containing the search results.
"""
results = self.api_wrapper.search(query)
docs = [
Document(
page_content=result["metadata"],
metadata={"id": result["id"], "score": result["score"]},
)
for result in results
]
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/kay.py | from __future__ import annotations
from typing import Any, List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class KayAiRetriever(BaseRetriever):
"""
Retriever for Kay.ai datasets.
To work properly, expects you to have KAY_API_KEY env variable set.
You can get one for free at https://kay.ai/.
"""
client: Any
num_contexts: int
@classmethod
def create(
cls,
dataset_id: str,
data_types: List[str],
num_contexts: int = 6,
) -> KayAiRetriever:
"""
Create a KayRetriever given a Kay dataset id and a list of datasources.
Args:
dataset_id: A dataset id category in Kay, like "company"
data_types: A list of datasources present within a dataset. For
"company" the corresponding datasources could be
["10-K", "10-Q", "8-K", "PressRelease"].
num_contexts: The number of documents to retrieve on each query.
Defaults to 6.
"""
try:
from kay.rag.retrievers import KayRetriever
except ImportError:
raise ImportError(
"Could not import kay python package. Please install it with "
"`pip install kay`.",
)
client = KayRetriever(dataset_id, data_types)
return cls(client=client, num_contexts=num_contexts)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
ctxs = self.client.query(query=query, num_context=self.num_contexts)
docs = []
for ctx in ctxs:
page_content = ctx.pop("chunk_embed_text", None)
if page_content is None:
continue
docs.append(Document(page_content=page_content, metadata={**ctx}))
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/docarray.py | from enum import Enum
from typing import Any, Dict, List, Optional, Union
import numpy as np
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils.pydantic import get_fields
from pydantic import ConfigDict
from langchain_community.vectorstores.utils import maximal_marginal_relevance
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
mmr = "mmr"
class DocArrayRetriever(BaseRetriever):
"""`DocArray Document Indices` retriever.
Currently, it supports 5 backends:
InMemoryExactNNIndex, HnswDocumentIndex, QdrantDocumentIndex,
ElasticDocIndex, and WeaviateDocumentIndex.
Args:
index: One of the above-mentioned index instances
embeddings: Embedding model to represent text as vectors
search_field: Field to consider for searching in the documents.
Should be an embedding/vector/tensor.
content_field: Field that represents the main content in your document schema.
Will be used as a `page_content`. Everything else will go into `metadata`.
search_type: Type of search to perform (similarity / mmr)
filters: Filters applied for document retrieval.
top_k: Number of documents to return
"""
index: Any = None
embeddings: Embeddings
search_field: str
content_field: str
search_type: SearchType = SearchType.similarity
top_k: int = 1
filters: Optional[Any] = None
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
query_emb = np.array(self.embeddings.embed_query(query))
if self.search_type == SearchType.similarity:
results = self._similarity_search(query_emb)
elif self.search_type == SearchType.mmr:
results = self._mmr_search(query_emb)
else:
raise ValueError(
f"Search type {self.search_type} does not exist. "
f"Choose either 'similarity' or 'mmr'."
)
return results
def _search(
self, query_emb: np.ndarray, top_k: int
) -> List[Union[Dict[str, Any], Any]]:
"""
Perform a search using the query embedding and return top_k documents.
Args:
query_emb: Query represented as an embedding
top_k: Number of documents to return
Returns:
A list of top_k documents matching the query
"""
from docarray.index import ElasticDocIndex, WeaviateDocumentIndex
filter_args = {}
search_field = self.search_field
if isinstance(self.index, WeaviateDocumentIndex):
filter_args["where_filter"] = self.filters
search_field = ""
elif isinstance(self.index, ElasticDocIndex):
filter_args["query"] = self.filters
else:
filter_args["filter_query"] = self.filters
if self.filters:
query = (
self.index.build_query() # get empty query object
.find(
query=query_emb, search_field=search_field
) # add vector similarity search
.filter(**filter_args) # add filter search
.build(limit=top_k) # build the query
)
# execute the combined query and return the results
docs = self.index.execute_query(query)
if hasattr(docs, "documents"):
docs = docs.documents
docs = docs[:top_k]
else:
docs = self.index.find(
query=query_emb, search_field=search_field, limit=top_k
).documents
return docs
def _similarity_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
"""
docs = self._search(query_emb=query_emb, top_k=self.top_k)
results = [self._docarray_to_langchain_doc(doc) for doc in docs]
return results
def _mmr_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a maximal marginal relevance (mmr) search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of diverse documents related to the query
"""
docs = self._search(query_emb=query_emb, top_k=20)
mmr_selected = maximal_marginal_relevance(
query_emb,
[
doc[self.search_field]
if isinstance(doc, dict)
else getattr(doc, self.search_field)
for doc in docs
],
k=self.top_k,
)
results = [self._docarray_to_langchain_doc(docs[idx]) for idx in mmr_selected]
return results
def _docarray_to_langchain_doc(self, doc: Union[Dict[str, Any], Any]) -> Document:
"""
Convert a DocArray document (which also might be a dict)
to a langchain document format.
DocArray document can contain arbitrary fields, so the mapping is done
in the following way:
page_content <-> content_field
metadata <-> all other fields excluding
tensors and embeddings (so float, int, string)
Args:
doc: DocArray document
Returns:
Document in langchain format
Raises:
ValueError: If the document doesn't contain the content field
"""
fields = doc.keys() if isinstance(doc, dict) else get_fields(doc)
if self.content_field not in fields:
raise ValueError(
f"Document does not contain the content field - {self.content_field}."
)
lc_doc = Document(
page_content=doc[self.content_field]
if isinstance(doc, dict)
else getattr(doc, self.content_field)
)
for name in fields:
value = doc[name] if isinstance(doc, dict) else getattr(doc, name)
if (
isinstance(value, (str, int, float, bool))
and name != self.content_field
):
lc_doc.metadata[name] = value
return lc_doc
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/pubmed.py | from typing import List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_community.utilities.pubmed import PubMedAPIWrapper
class PubMedRetriever(BaseRetriever, PubMedAPIWrapper):
"""`PubMed API` retriever.
It wraps load() to get_relevant_documents().
It uses all PubMedAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.load_docs(query=query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/remote_retriever.py | from typing import List, Optional
import aiohttp
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class RemoteLangChainRetriever(BaseRetriever):
"""`LangChain API` retriever."""
url: str
"""URL of the remote LangChain API."""
headers: Optional[dict] = None
"""Headers to use for the request."""
input_key: str = "message"
"""Key to use for the input in the request."""
response_key: str = "response"
"""Key to use for the response in the request."""
page_content_key: str = "page_content"
"""Key to use for the page content in the response."""
metadata_key: str = "metadata"
"""Key to use for the metadata in the response."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
response = requests.post(
self.url, json={self.input_key: query}, headers=self.headers
)
result = response.json()
return [
Document(
page_content=r[self.page_content_key], metadata=r[self.metadata_key]
)
for r in result[self.response_key]
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST", self.url, headers=self.headers, json={self.input_key: query}
) as response:
result = await response.json()
return [
Document(
page_content=r[self.page_content_key], metadata=r[self.metadata_key]
)
for r in result[self.response_key]
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/zep.py | from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import model_validator
if TYPE_CHECKING:
from zep_python.memory import MemorySearchResult
class SearchScope(str, Enum):
"""Which documents to search. Messages or Summaries?"""
messages = "messages"
"""Search chat history messages."""
summary = "summary"
"""Search chat history summaries."""
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class ZepRetriever(BaseRetriever):
"""`Zep` MemoryStore Retriever.
Search your user's long-term chat history with Zep.
Zep offers both simple semantic search and Maximal Marginal Relevance (MMR)
reranking of search results.
Note: You will need to provide the user's `session_id` to use this retriever.
Args:
url: URL of your Zep server (required)
api_key: Your Zep API key (optional)
session_id: Identifies your user or a user's session (required)
top_k: Number of documents to return (default: 3, optional)
search_type: Type of search to perform (similarity / mmr) (default: similarity,
optional)
mmr_lambda: Lambda value for MMR search. Defaults to 0.5 (optional)
Zep - Fast, scalable building blocks for LLM Apps
=========
Zep is an open source platform for productionizing LLM apps. Go from a prototype
built in LangChain or LlamaIndex, or a custom app, to production in minutes without
rewriting code.
For server installation instructions, see:
https://docs.getzep.com/deployment/quickstart/
"""
zep_client: Optional[Any] = None
"""Zep client."""
url: str
"""URL of your Zep server."""
api_key: Optional[str] = None
"""Your Zep API key."""
session_id: str
"""Zep session ID."""
top_k: Optional[int]
"""Number of items to return."""
search_scope: SearchScope = SearchScope.messages
"""Which documents to search. Messages or Summaries?"""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
mmr_lambda: Optional[float] = None
"""Lambda value for MMR search."""
@model_validator(mode="before")
@classmethod
def create_client(cls, values: dict) -> Any:
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
values["zep_client"] = values.get(
"zep_client",
ZepClient(base_url=values["url"], api_key=values.get("api_key")),
)
return values
def _messages_search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.message.pop("content"),
metadata={"score": r.dist, **r.message},
)
for r in results
if r.message
]
def _summary_search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.summary.content,
metadata={
"score": r.dist,
"uuid": r.summary.uuid,
"created_at": r.summary.created_at,
"token_count": r.summary.token_count,
},
)
for r in results
if r.summary
]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
from zep_python.memory import MemorySearchPayload
if not self.zep_client:
raise RuntimeError("Zep client not initialized.")
payload = MemorySearchPayload(
text=query,
metadata=metadata,
search_scope=self.search_scope,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
)
results: List[MemorySearchResult] = self.zep_client.memory.search_memory(
self.session_id, payload, limit=self.top_k
)
if self.search_scope == SearchScope.summary:
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
from zep_python.memory import MemorySearchPayload
if not self.zep_client:
raise RuntimeError("Zep client not initialized.")
payload = MemorySearchPayload(
text=query,
metadata=metadata,
search_scope=self.search_scope,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
)
results: List[MemorySearchResult] = await self.zep_client.memory.asearch_memory(
self.session_id, payload, limit=self.top_k
)
if self.search_scope == SearchScope.summary:
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/web_research.py | import logging
import re
from typing import Any, List, Optional
from langchain.chains import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLLM
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.retrievers import BaseRetriever
from langchain_core.vectorstores import VectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import BaseModel, Field
from langchain_community.document_loaders import AsyncHtmlLoader
from langchain_community.document_transformers import Html2TextTransformer
from langchain_community.llms import LlamaCpp
from langchain_community.utilities import GoogleSearchAPIWrapper
logger = logging.getLogger(__name__)
class SearchQueries(BaseModel):
"""Search queries to research for the user's goal."""
queries: List[str] = Field(
..., description="List of search queries to look up on Google"
)
DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""<<SYS>> \n You are an assistant tasked with improving Google search \
results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that \
are similar to this question. The output should be a numbered list of questions \
and each should have a question mark at the end: \n\n {question} [/INST]""",
)
DEFAULT_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an assistant tasked with improving Google search \
results. Generate THREE Google search queries that are similar to \
this question. The output should be a numbered list of questions and each \
should have a question mark at the end: {question}""",
)
class QuestionListOutputParser(BaseOutputParser[List[str]]):
"""Output parser for a list of numbered questions."""
def parse(self, text: str) -> List[str]:
lines = re.findall(r"\d+\..*?(?:\n|$)", text)
return lines
class WebResearchRetriever(BaseRetriever):
"""`Google Search API` retriever."""
# Inputs
vectorstore: VectorStore = Field(
..., description="Vector store for storing web pages"
)
llm_chain: LLMChain
search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper")
num_search_results: int = Field(1, description="Number of pages per Google search")
text_splitter: TextSplitter = Field(
RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50),
description="Text splitter for splitting web pages into chunks",
)
url_database: List[str] = Field(
default_factory=list, description="List of processed URLs"
)
trust_env: bool = Field(
False,
description="Whether to use the http_proxy/https_proxy env variables or "
"check .netrc for proxy configuration",
)
allow_dangerous_requests: bool = False
"""A flag to force users to acknowledge the risks of SSRF attacks when using
this retriever.
Users should set this flag to `True` if they have taken the necessary precautions
to prevent SSRF attacks when using this retriever.
For example, users can run the requests through a properly configured
proxy and prevent the crawler from accidentally crawling internal resources.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the retriever."""
allow_dangerous_requests = kwargs.get("allow_dangerous_requests", False)
if not allow_dangerous_requests:
raise ValueError(
"WebResearchRetriever crawls URLs surfaced through "
"the provided search engine. It is possible that some of those URLs "
"will end up pointing to machines residing on an internal network, "
"leading"
"to an SSRF (Server-Side Request Forgery) attack. "
"To protect yourself against that risk, you can run the requests "
"through a proxy and prevent the crawler from accidentally crawling "
"internal resources."
"If've taken the necessary precautions, you can set "
"`allow_dangerous_requests` to `True`."
)
super().__init__(**kwargs)
@classmethod
def from_llm(
cls,
vectorstore: VectorStore,
llm: BaseLLM,
search: GoogleSearchAPIWrapper,
prompt: Optional[BasePromptTemplate] = None,
num_search_results: int = 1,
text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter(
chunk_size=1500, chunk_overlap=150
),
trust_env: bool = False,
allow_dangerous_requests: bool = False,
) -> "WebResearchRetriever":
"""Initialize from llm using default template.
Args:
vectorstore: Vector store for storing web pages
llm: llm for search question generation
search: GoogleSearchAPIWrapper
prompt: prompt to generating search questions
num_search_results: Number of pages per Google search
text_splitter: Text splitter for splitting web pages into chunks
trust_env: Whether to use the http_proxy/https_proxy env variables
or check .netrc for proxy configuration
allow_dangerous_requests: A flag to force users to acknowledge
the risks of SSRF attacks when using this retriever
Returns:
WebResearchRetriever
"""
if not prompt:
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_SEARCH_PROMPT,
conditionals=[
(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)
],
)
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)
# Use chat model prompt
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
output_parser=QuestionListOutputParser(),
)
return cls(
vectorstore=vectorstore,
llm_chain=llm_chain,
search=search,
num_search_results=num_search_results,
text_splitter=text_splitter,
trust_env=trust_env,
allow_dangerous_requests=allow_dangerous_requests,
)
def clean_search_query(self, query: str) -> str:
# Some search tools (e.g., Google) will
# fail to return results if query has a
# leading digit: 1. "LangCh..."
# Check if the first character is a digit
if query[0].isdigit():
# Find the position of the first quote
first_quote_pos = query.find('"')
if first_quote_pos != -1:
# Extract the part of the string after the quote
query = query[first_quote_pos + 1 :]
# Remove the trailing quote if present
if query.endswith('"'):
query = query[:-1]
return query.strip()
def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]:
"""Returns num_search_results pages per Google search."""
query_clean = self.clean_search_query(query)
result = self.search.results(query_clean, num_search_results)
return result
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Search Google for documents related to the query input.
Args:
query: user query
Returns:
Relevant documents from all various urls.
"""
# Get search questions
logger.info("Generating questions for Google Search ...")
result = self.llm_chain({"question": query})
logger.info(f"Questions for Google Search (raw): {result}")
questions = result["text"]
logger.info(f"Questions for Google Search: {questions}")
# Get urls
logger.info("Searching for relevant urls...")
urls_to_look = []
for query in questions:
# Google search
search_results = self.search_tool(query, self.num_search_results)
logger.info("Searching for relevant urls...")
logger.info(f"Search results: {search_results}")
for res in search_results:
if res.get("link", None):
urls_to_look.append(res["link"])
# Relevant urls
urls = set(urls_to_look)
# Check for any new urls that we have not processed
new_urls = list(urls.difference(self.url_database))
logger.info(f"New URLs to load: {new_urls}")
# Load, split, and add new urls to vectorstore
if new_urls:
loader = AsyncHtmlLoader(
new_urls, ignore_load_errors=True, trust_env=self.trust_env
)
html2text = Html2TextTransformer()
logger.info("Indexing new urls...")
docs = loader.load()
docs = list(html2text.transform_documents(docs))
docs = self.text_splitter.split_documents(docs)
self.vectorstore.add_documents(docs)
self.url_database.extend(new_urls)
# Search for relevant splits
# TODO: make this async
logger.info("Grabbing most relevant splits from urls...")
docs = []
for query in questions:
docs.extend(self.vectorstore.similarity_search(query))
# Get unique docs
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in docs
}
unique_documents = list(unique_documents_dict.values())
return unique_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/google_cloud_documentai_warehouse.py | """Retriever wrapper for Google Cloud Document AI Warehouse."""
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import get_from_dict_or_env, pre_init
from langchain_community.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.cloud.contentwarehouse_v1 import (
DocumentServiceClient,
RequestMetadata,
SearchDocumentsRequest,
)
from google.cloud.contentwarehouse_v1.services.document_service.pagers import (
SearchDocumentsPager,
)
@deprecated(
since="0.0.32",
removal="1.0",
alternative_import="langchain_google_community.DocumentAIWarehouseRetriever",
)
class GoogleDocumentAIWarehouseRetriever(BaseRetriever):
"""A retriever based on Document AI Warehouse.
Documents should be created and documents should be uploaded
in a separate flow, and this retriever uses only Document AI
schema_id provided to search for relevant documents.
More info: https://cloud.google.com/document-ai-warehouse.
"""
location: str = "us"
"""Google Cloud location where Document AI Warehouse is placed."""
project_number: str
"""Google Cloud project number, should contain digits only."""
schema_id: Optional[str] = None
"""Document AI Warehouse schema to query against.
If nothing is provided, all documents in the project will be searched."""
qa_size_limit: int = 5
"""The limit on the number of documents returned."""
client: "DocumentServiceClient" = None #: :meta private:
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validates the environment."""
try:
from google.cloud.contentwarehouse_v1 import DocumentServiceClient
except ImportError as exc:
raise ImportError(
"google.cloud.contentwarehouse is not installed."
"Please install it with pip install google-cloud-contentwarehouse"
) from exc
values["project_number"] = get_from_dict_or_env(
values, "project_number", "PROJECT_NUMBER"
)
values["client"] = DocumentServiceClient(
client_info=get_client_info(module="document-ai-warehouse")
)
return values
def _prepare_request_metadata(self, user_ldap: str) -> "RequestMetadata":
from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo
user_info = UserInfo(id=f"user:{user_ldap}")
return RequestMetadata(user_info=user_info)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
request = self._prepare_search_request(query, **kwargs)
response = self.client.search_documents(request=request)
return self._parse_search_response(response=response)
def _prepare_search_request(
self, query: str, **kwargs: Any
) -> "SearchDocumentsRequest":
from google.cloud.contentwarehouse_v1 import (
DocumentQuery,
SearchDocumentsRequest,
)
try:
user_ldap = kwargs["user_ldap"]
except KeyError:
raise ValueError("Argument user_ldap should be provided!")
request_metadata = self._prepare_request_metadata(user_ldap=user_ldap)
schemas = []
if self.schema_id:
schemas.append(
self.client.document_schema_path(
project=self.project_number,
location=self.location,
document_schema=self.schema_id,
)
)
return SearchDocumentsRequest(
parent=self.client.common_location_path(self.project_number, self.location),
request_metadata=request_metadata,
document_query=DocumentQuery(
query=query, is_nl_query=True, document_schema_names=schemas
),
qa_size_limit=self.qa_size_limit,
)
def _parse_search_response(
self, response: "SearchDocumentsPager"
) -> List[Document]:
documents = []
for doc in response.matching_documents:
metadata = {
"title": doc.document.title,
"source": doc.document.raw_document_path,
}
documents.append(
Document(page_content=doc.search_text_snippet, metadata=metadata)
)
return documents
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/outline.py | from typing import List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_community.utilities.outline import OutlineAPIWrapper
class OutlineRetriever(BaseRetriever, OutlineAPIWrapper):
"""Retriever for Outline API.
It wraps run() to get_relevant_documents().
It uses all OutlineAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
return self.run(query=query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/svm.py | from __future__ import annotations
import concurrent.futures
from typing import Any, Iterable, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict
def create_index(contexts: List[str], embeddings: Embeddings) -> np.ndarray:
"""
Create an index of embeddings for a list of contexts.
Args:
contexts: List of contexts to embed.
embeddings: Embeddings model to use.
Returns:
Index of embeddings.
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return np.array(list(executor.map(embeddings.embed_query, contexts)))
class SVMRetriever(BaseRetriever):
"""`SVM` retriever.
Largely based on
https://github.com/karpathy/randomfun/blob/master/knn_vs_svm.ipynb
"""
embeddings: Embeddings
"""Embeddings model to use."""
index: Any = None
"""Index of embeddings."""
texts: List[str]
"""List of texts to index."""
metadatas: Optional[List[dict]] = None
"""List of metadatas corresponding with each text."""
k: int = 4
"""Number of results to return."""
relevancy_threshold: Optional[float] = None
"""Threshold for relevancy."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embeddings: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> SVMRetriever:
index = create_index(texts, embeddings)
return cls(
embeddings=embeddings,
index=index,
texts=texts,
metadatas=metadatas,
**kwargs,
)
@classmethod
def from_documents(
cls,
documents: Iterable[Document],
embeddings: Embeddings,
**kwargs: Any,
) -> SVMRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts, embeddings=embeddings, metadatas=metadatas, **kwargs
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
try:
from sklearn import svm
except ImportError:
raise ImportError(
"Could not import scikit-learn, please install with `pip install "
"scikit-learn`."
)
query_embeds = np.array(self.embeddings.embed_query(query))
x = np.concatenate([query_embeds[None, ...], self.index])
y = np.zeros(x.shape[0])
y[0] = 1
clf = svm.LinearSVC(
class_weight="balanced", verbose=False, max_iter=10000, tol=1e-6, C=0.1
)
clf.fit(x, y)
similarities = clf.decision_function(x)
sorted_ix = np.argsort(-similarities)
# svm.LinearSVC in scikit-learn is non-deterministic.
# if a text is the same as a query, there is no guarantee
# the query will be in the first index.
# this performs a simple swap, this works because anything
# left of the 0 should be equivalent.
zero_index = np.where(sorted_ix == 0)[0][0]
if zero_index != 0:
sorted_ix[0], sorted_ix[zero_index] = sorted_ix[zero_index], sorted_ix[0]
denominator = np.max(similarities) - np.min(similarities) + 1e-6
normalized_similarities = (similarities - np.min(similarities)) / denominator
top_k_results = []
for row in sorted_ix[1 : self.k + 1]:
if (
self.relevancy_threshold is None
or normalized_similarities[row] >= self.relevancy_threshold
):
metadata = self.metadatas[row - 1] if self.metadatas else {}
doc = Document(page_content=self.texts[row - 1], metadata=metadata)
top_k_results.append(doc)
return top_k_results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/qdrant_sparse_vector_retriever.py | import uuid
from itertools import islice
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Tuple,
cast,
)
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import pre_init
from pydantic import ConfigDict
from langchain_community.vectorstores.qdrant import Qdrant, QdrantException
@deprecated(
since="0.2.16",
alternative=(
"Qdrant vector store now supports sparse retrievals natively. "
"Use langchain_qdrant.QdrantVectorStore#as_retriever() instead. "
"Reference: "
"https://python.langchain.com/docs/integrations/vectorstores/qdrant/#sparse-vector-search"
),
removal="0.5.0",
)
class QdrantSparseVectorRetriever(BaseRetriever):
"""Qdrant sparse vector retriever."""
client: Any = None
"""'qdrant_client' instance to use."""
collection_name: str
"""Qdrant collection name."""
sparse_vector_name: str
"""Name of the sparse vector to use."""
sparse_encoder: Callable[[str], Tuple[List[int], List[float]]]
"""Sparse encoder function to use."""
k: int = 4
"""Number of documents to return per query. Defaults to 4."""
filter: Optional[Any] = None
"""Qdrant qdrant_client.models.Filter to use for queries. Defaults to None."""
content_payload_key: str = "content"
"""Payload field containing the document content. Defaults to 'content'"""
metadata_payload_key: str = "metadata"
"""Payload field containing the document metadata. Defaults to 'metadata'."""
search_options: Dict[str, Any] = {}
"""Additional search options to pass to qdrant_client.QdrantClient.search()."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that 'qdrant_client' python package exists in environment."""
try:
from grpc import RpcError
from qdrant_client import QdrantClient, models
from qdrant_client.http.exceptions import UnexpectedResponse
except ImportError:
raise ImportError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
client = values["client"]
if not isinstance(client, QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
filter = values["filter"]
if filter is not None and not isinstance(filter, models.Filter):
raise ValueError(
f"filter should be an instance of qdrant_client.models.Filter, "
f"got {type(filter)}"
)
client = cast(QdrantClient, client)
collection_name = values["collection_name"]
sparse_vector_name = values["sparse_vector_name"]
try:
collection_info = client.get_collection(collection_name)
sparse_vectors_config = collection_info.config.params.sparse_vectors
if sparse_vector_name not in sparse_vectors_config:
raise QdrantException(
f"Existing Qdrant collection {collection_name} does not "
f"contain sparse vector named {sparse_vector_name}."
f"Did you mean one of {', '.join(sparse_vectors_config.keys())}?"
)
except (UnexpectedResponse, RpcError, ValueError):
raise QdrantException(
f"Qdrant collection {collection_name} does not exist."
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
from qdrant_client import QdrantClient, models
client = cast(QdrantClient, self.client)
query_indices, query_values = self.sparse_encoder(query)
results = client.search(
self.collection_name,
query_filter=self.filter,
query_vector=models.NamedSparseVector(
name=self.sparse_vector_name,
vector=models.SparseVector(
indices=query_indices,
values=query_values,
),
),
limit=self.k,
with_vectors=False,
**self.search_options,
)
return [
Qdrant._document_from_scored_point(
point,
self.collection_name,
self.content_payload_key,
self.metadata_payload_key,
)
for point in results
]
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Run more documents through the embeddings and add to the vectorstore.
Args:
documents (List[Document]: Documents to add to the vectorstore.
Returns:
List[str]: List of IDs of the added texts.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return self.add_texts(texts, metadatas, **kwargs)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
**kwargs: Any,
) -> List[str]:
from qdrant_client import QdrantClient
added_ids = []
client = cast(QdrantClient, self.client)
for batch_ids, points in self._generate_rest_batches(
texts, metadatas, ids, batch_size
):
client.upsert(self.collection_name, points=points, **kwargs)
added_ids.extend(batch_ids)
return added_ids
def _generate_rest_batches(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[Sequence[str]] = None,
batch_size: int = 64,
) -> Generator[Tuple[List[str], List[Any]], None, None]:
from qdrant_client import models as rest
texts_iterator = iter(texts)
metadatas_iterator = iter(metadatas or [])
ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)])
while batch_texts := list(islice(texts_iterator, batch_size)):
# Take the corresponding metadata and id for each text in a batch
batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None
batch_ids = list(islice(ids_iterator, batch_size))
# Generate the sparse embeddings for all the texts in a batch
batch_embeddings: List[Tuple[List[int], List[float]]] = [
self.sparse_encoder(text) for text in batch_texts
]
points = [
rest.PointStruct(
id=point_id,
vector={
self.sparse_vector_name: rest.SparseVector(
indices=sparse_vector[0],
values=sparse_vector[1],
)
},
payload=payload,
)
for point_id, sparse_vector, payload in zip(
batch_ids,
batch_embeddings,
Qdrant._build_payloads(
batch_texts,
batch_metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
)
]
yield batch_ids, points
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/chaindesk.py | from typing import Any, List, Optional
import aiohttp
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class ChaindeskRetriever(BaseRetriever):
"""`Chaindesk API` retriever."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def __init__(
self,
datastore_url: str,
top_k: Optional[int] = None,
api_key: Optional[str] = None,
):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/chatgpt_plugin_retriever.py | from __future__ import annotations
from typing import List, Optional
import aiohttp
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict
class ChatGPTPluginRetriever(BaseRetriever):
"""`ChatGPT plugin` retriever."""
url: str
"""URL of the ChatGPT plugin."""
bearer_token: str
"""Bearer token for the ChatGPT plugin."""
top_k: int = 3
"""Number of documents to return."""
filter: Optional[dict] = None
"""Filter to apply to the results."""
aiosession: Optional[aiohttp.ClientSession] = None
"""Aiohttp session to use for requests."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
url, json, headers = self._create_request(query)
response = requests.post(url, json=json, headers=headers)
results = response.json()["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
metadata = d.pop("metadata", d)
if metadata.get("source_id"):
metadata["source"] = metadata.pop("source_id")
docs.append(Document(page_content=content, metadata=metadata))
return docs
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
url, json, headers = self._create_request(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(url, headers=headers, json=json) as response:
res = await response.json()
else:
async with self.aiosession.post(
url, headers=headers, json=json
) as response:
res = await response.json()
results = res["results"][0]["results"]
docs = []
for d in results:
content = d.pop("text")
metadata = d.pop("metadata", d)
if metadata.get("source_id"):
metadata["source"] = metadata.pop("source_id")
docs.append(Document(page_content=content, metadata=metadata))
return docs
def _create_request(self, query: str) -> tuple[str, dict, dict]:
url = f"{self.url}/query"
json = {
"queries": [
{
"query": query,
"filter": self.filter,
"top_k": self.top_k,
}
]
}
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.bearer_token}",
}
return url, json, headers
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/tfidf.py | from __future__ import annotations
import pickle
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict
class TFIDFRetriever(BaseRetriever):
"""`TF-IDF` retriever.
Largely based on
https://github.com/asvskartheek/Text-Retrieval/blob/master/TF-IDF%20Search%20Engine%20(SKLEARN).ipynb
"""
vectorizer: Any = None
"""TF-IDF vectorizer."""
docs: List[Document]
"""Documents."""
tfidf_array: Any = None
"""TF-IDF array."""
k: int = 4
"""Number of documents to return."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@classmethod
def from_texts(
cls,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
try:
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError:
raise ImportError(
"Could not import scikit-learn, please install with `pip install "
"scikit-learn`."
)
tfidf_params = tfidf_params or {}
vectorizer = TfidfVectorizer(**tfidf_params)
tfidf_array = vectorizer.fit_transform(texts)
metadatas = metadatas or ({} for _ in texts)
docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)]
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs)
@classmethod
def from_documents(
cls,
documents: Iterable[Document],
*,
tfidf_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> TFIDFRetriever:
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(
texts=texts, tfidf_params=tfidf_params, metadatas=metadatas, **kwargs
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
from sklearn.metrics.pairwise import cosine_similarity
query_vec = self.vectorizer.transform(
[query]
) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
results = cosine_similarity(self.tfidf_array, query_vec).reshape(
(-1,)
) # Op -- (n_docs,1) -- Cosine Sim with each doc
return_docs = [self.docs[i] for i in results.argsort()[-self.k :][::-1]]
return return_docs
def save_local(
self,
folder_path: str,
file_name: str = "tfidf_vectorizer",
) -> None:
try:
import joblib
except ImportError:
raise ImportError(
"Could not import joblib, please install with `pip install joblib`."
)
path = Path(folder_path)
path.mkdir(exist_ok=True, parents=True)
# Save vectorizer with joblib dump.
joblib.dump(self.vectorizer, path / f"{file_name}.joblib")
# Save docs and tfidf array as pickle.
with open(path / f"{file_name}.pkl", "wb") as f:
pickle.dump((self.docs, self.tfidf_array), f)
@classmethod
def load_local(
cls,
folder_path: str,
*,
allow_dangerous_deserialization: bool = False,
file_name: str = "tfidf_vectorizer",
) -> TFIDFRetriever:
"""Load the retriever from local storage.
Args:
folder_path: Folder path to load from.
allow_dangerous_deserialization: Whether to allow dangerous deserialization.
Defaults to False.
The deserialization relies on .joblib and .pkl files, which can be
modified to deliver a malicious payload that results in execution of
arbitrary code on your machine. You will need to set this to `True` to
use deserialization. If you do this, make sure you trust the source of
the file.
file_name: File name to load from. Defaults to "tfidf_vectorizer".
Returns:
TFIDFRetriever: Loaded retriever.
"""
try:
import joblib
except ImportError:
raise ImportError(
"Could not import joblib, please install with `pip install joblib`."
)
if not allow_dangerous_deserialization:
raise ValueError(
"The de-serialization of this retriever is based on .joblib and "
".pkl files."
"Such files can be modified to deliver a malicious payload that "
"results in execution of arbitrary code on your machine."
"You will need to set `allow_dangerous_deserialization` to `True` to "
"load this retriever. If you do this, make sure you trust the source "
"of the file, and you are responsible for validating the file "
"came from a trusted source."
)
path = Path(folder_path)
# Load vectorizer with joblib load.
vectorizer = joblib.load(path / f"{file_name}.joblib")
# Load docs and tfidf array as pickle.
with open(path / f"{file_name}.pkl", "rb") as f:
# This code path can only be triggered if the user
# passed allow_dangerous_deserialization=True
docs, tfidf_array = pickle.load(f) # ignore[pickle]: explicit-opt-in
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/arcee.py | from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, SecretStr
from langchain_community.utilities.arcee import ArceeWrapper, DALMFilter
class ArceeRetriever(BaseRetriever):
"""Arcee Domain Adapted Language Models (DALMs) retriever.
To use, set the ``ARCEE_API_KEY`` environment variable with your Arcee API key,
or pass ``arcee_api_key`` as a named parameter.
Example:
.. code-block:: python
from langchain_community.retrievers import ArceeRetriever
retriever = ArceeRetriever(
model="DALM-PubMed",
arcee_api_key="ARCEE-API-KEY"
)
documents = retriever.invoke("AI-driven music therapy")
"""
_client: Optional[ArceeWrapper] = None #: :meta private:
"""Arcee client."""
arcee_api_key: SecretStr
"""Arcee API Key"""
model: str
"""Arcee DALM name"""
arcee_api_url: str = "https://api.arcee.ai"
"""Arcee API URL"""
arcee_api_version: str = "v2"
"""Arcee API Version"""
arcee_app_url: str = "https://app.arcee.ai"
"""Arcee App URL"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Keyword arguments to pass to the model."""
model_config = ConfigDict(
extra="forbid",
)
def __init__(self, **data: Any) -> None:
"""Initializes private fields."""
super().__init__(**data)
self._client = ArceeWrapper(
arcee_api_key=self.arcee_api_key.get_secret_value(),
arcee_api_url=self.arcee_api_url,
arcee_api_version=self.arcee_api_version,
model_kwargs=self.model_kwargs,
model_name=self.model,
)
self._client.validate_model_training_status()
@pre_init
def validate_environments(cls, values: Dict) -> Dict:
"""Validate Arcee environment variables."""
# validate env vars
values["arcee_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"arcee_api_key",
"ARCEE_API_KEY",
)
)
values["arcee_api_url"] = get_from_dict_or_env(
values,
"arcee_api_url",
"ARCEE_API_URL",
)
values["arcee_app_url"] = get_from_dict_or_env(
values,
"arcee_app_url",
"ARCEE_APP_URL",
)
values["arcee_api_version"] = get_from_dict_or_env(
values,
"arcee_api_version",
"ARCEE_API_VERSION",
)
# validate model kwargs
if values["model_kwargs"]:
kw = values["model_kwargs"]
# validate size
if kw.get("size") is not None:
if not kw.get("size") >= 0:
raise ValueError("`size` must not be negative.")
# validate filters
if kw.get("filters") is not None:
if not isinstance(kw.get("filters"), List):
raise ValueError("`filters` must be a list.")
for f in kw.get("filters"):
DALMFilter(**f)
return values
def _get_relevant_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
"""Retrieve {size} contexts with your retriever for a given query
Args:
query: Query to submit to the model
size: The max number of context results to retrieve.
Defaults to 3. (Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
try:
if not self._client:
raise ValueError("Client is not initialized.")
return self._client.retrieve(query=query, **kwargs)
except Exception as e:
raise ValueError(f"Error while retrieving documents: {e}") from e
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/databerry.py | from typing import List, Optional
import aiohttp
import requests
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class DataberryRetriever(BaseRetriever):
"""`Databerry API` retriever."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/weaviate_hybrid_search.py | from __future__ import annotations
from typing import Any, Dict, List, Optional, cast
from uuid import uuid4
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict, model_validator
class WeaviateHybridSearchRetriever(BaseRetriever):
"""`Weaviate hybrid search` retriever.
See the documentation:
https://weaviate.io/blog/hybrid-search-explained
"""
client: Any = None
"""keyword arguments to pass to the Weaviate client."""
index_name: str
"""The name of the index to use."""
text_key: str
"""The name of the text key to use."""
alpha: float = 0.5
"""The weight of the text key in the hybrid search."""
k: int = 4
"""The number of results to return."""
attributes: List[str]
"""The attributes to return in the results."""
create_schema_if_missing: bool = True
"""Whether to create the schema if it doesn't exist."""
@model_validator(mode="before")
@classmethod
def validate_client(
cls,
values: Dict[str, Any],
) -> Any:
try:
import weaviate
except ImportError:
raise ImportError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(values["client"], weaviate.Client):
client = values["client"]
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
if values.get("attributes") is None:
values["attributes"] = []
cast(List, values["attributes"]).append(values["text_key"])
if values.get("create_schema_if_missing", True):
class_obj = {
"class": values["index_name"],
"properties": [{"name": values["text_key"], "dataType": ["text"]}],
"vectorizer": "text2vec-openai",
}
if not values["client"].schema.exists(values["index_name"]):
values["client"].schema.create_class(class_obj)
return values
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
# added text_key
def add_documents(self, docs: List[Document], **kwargs: Any) -> List[str]:
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self.client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self.text_key: doc.page_content, **metadata}
# If the UUID of one of the objects already exists
# then the existing objectwill be replaced by the new object.
if "uuids" in kwargs:
_id = kwargs["uuids"][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self.index_name, _id)
ids.append(_id)
return ids
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
where_filter: Optional[Dict[str, object]] = None,
score: bool = False,
hybrid_search_kwargs: Optional[Dict[str, object]] = None,
) -> List[Document]:
"""Look up similar documents in Weaviate.
query: The query to search for relevant documents
of using weviate hybrid search.
where_filter: A filter to apply to the query.
https://weaviate.io/developers/weaviate/guides/querying/#filtering
score: Whether to include the score, and score explanation
in the returned Documents meta_data.
hybrid_search_kwargs: Used to pass additional arguments
to the .with_hybrid() method.
The primary uses cases for this are:
1) Search specific properties only -
specify which properties to be used during hybrid search portion.
Note: this is not the same as the (self.attributes) to be returned.
Example - hybrid_search_kwargs={"properties": ["question", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#selected-properties-only
2) Weight boosted searched properties -
Boost the weight of certain properties during the hybrid search portion.
Example - hybrid_search_kwargs={"properties": ["question^2", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#weight-boost-searched-properties
3) Search with a custom vector - Define a different vector
to be used during the hybrid search portion.
Example - hybrid_search_kwargs={"vector": [0.1, 0.2, 0.3, ...]}
https://weaviate.io/developers/weaviate/search/hybrid#with-a-custom-vector
4) Use Fusion ranking method
Example - from weaviate.gql.get import HybridFusion
hybrid_search_kwargs={"fusion": fusion_type=HybridFusion.RELATIVE_SCORE}
https://weaviate.io/developers/weaviate/search/hybrid#fusion-ranking-method
"""
query_obj = self.client.query.get(self.index_name, self.attributes)
if where_filter:
query_obj = query_obj.with_where(where_filter)
if score:
query_obj = query_obj.with_additional(["score", "explainScore"])
if hybrid_search_kwargs is None:
hybrid_search_kwargs = {}
result = (
query_obj.with_hybrid(query, alpha=self.alpha, **hybrid_search_kwargs)
.with_limit(self.k)
.do()
)
if "errors" in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result["data"]["Get"][self.index_name]:
text = res.pop(self.text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/pinecone_hybrid_search.py | """Taken from: https://docs.pinecone.io/docs/hybrid-search"""
import hashlib
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import pre_init
from pydantic import ConfigDict
def hash_text(text: str) -> str:
"""Hash a text using SHA256.
Args:
text: Text to hash.
Returns:
Hashed text.
"""
return str(hashlib.sha256(text.encode("utf-8")).hexdigest())
def create_index(
contexts: List[str],
index: Any,
embeddings: Embeddings,
sparse_encoder: Any,
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
namespace: Optional[str] = None,
) -> None:
"""Create an index from a list of contexts.
It modifies the index argument in-place!
Args:
contexts: List of contexts to embed.
index: Index to use.
embeddings: Embeddings model to use.
sparse_encoder: Sparse encoder to use.
ids: List of ids to use for the documents.
metadatas: List of metadata to use for the documents.
namespace: Namespace value for index partition.
"""
batch_size = 32
_iterator = range(0, len(contexts), batch_size)
try:
from tqdm.auto import tqdm
_iterator = tqdm(_iterator)
except ImportError:
pass
if ids is None:
# create unique ids using hash of the text
ids = [hash_text(context) for context in contexts]
for i in _iterator:
# find end of batch
i_end = min(i + batch_size, len(contexts))
# extract batch
context_batch = contexts[i:i_end]
batch_ids = ids[i:i_end]
metadata_batch = (
metadatas[i:i_end] if metadatas else [{} for _ in context_batch]
)
# add context passages as metadata
meta = [
{"context": context, **metadata}
for context, metadata in zip(context_batch, metadata_batch)
]
# create dense vectors
dense_embeds = embeddings.embed_documents(context_batch)
# create sparse vectors
sparse_embeds = sparse_encoder.encode_documents(context_batch)
for s in sparse_embeds:
s["values"] = [float(s1) for s1 in s["values"]]
vectors = []
# loop through the data and create dictionaries for upserts
for doc_id, sparse, dense, metadata in zip(
batch_ids, sparse_embeds, dense_embeds, meta
):
vectors.append(
{
"id": doc_id,
"sparse_values": sparse,
"values": dense,
"metadata": metadata,
}
)
# upload the documents to the new hybrid index
index.upsert(vectors, namespace=namespace)
class PineconeHybridSearchRetriever(BaseRetriever):
"""`Pinecone Hybrid Search` retriever."""
embeddings: Embeddings
"""Embeddings model to use."""
"""description"""
sparse_encoder: Any = None
"""Sparse encoder to use."""
index: Any = None
"""Pinecone index to use."""
top_k: int = 4
"""Number of documents to return."""
alpha: float = 0.5
"""Alpha value for hybrid search."""
namespace: Optional[str] = None
"""Namespace value for index partition."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def add_texts(
self,
texts: List[str],
ids: Optional[List[str]] = None,
metadatas: Optional[List[dict]] = None,
namespace: Optional[str] = None,
) -> None:
create_index(
texts,
self.index,
self.embeddings,
self.sparse_encoder,
ids=ids,
metadatas=metadatas,
namespace=namespace,
)
@pre_init
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from pinecone_text.hybrid import hybrid_convex_scale # noqa:F401
from pinecone_text.sparse.base_sparse_encoder import (
BaseSparseEncoder, # noqa:F401
)
except ImportError:
raise ImportError(
"Could not import pinecone_text python package. "
"Please install it with `pip install pinecone_text`."
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
from pinecone_text.hybrid import hybrid_convex_scale
sparse_vec = self.sparse_encoder.encode_queries(query)
# convert the question into a dense vector
dense_vec = self.embeddings.embed_query(query)
# scale alpha with hybrid_scale
dense_vec, sparse_vec = hybrid_convex_scale(dense_vec, sparse_vec, self.alpha)
sparse_vec["values"] = [float(s1) for s1 in sparse_vec["values"]]
# query pinecone with the query parameters
result = self.index.query(
vector=dense_vec,
sparse_vector=sparse_vec,
top_k=self.top_k,
include_metadata=True,
namespace=self.namespace,
**kwargs,
)
final_result = []
for res in result["matches"]:
context = res["metadata"].pop("context")
metadata = res["metadata"]
if "score" not in metadata and "score" in res:
metadata["score"] = res["score"]
final_result.append(Document(page_content=context, metadata=metadata))
# return search results as json
return final_result
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/pupmed.py | from langchain_community.retrievers.pubmed import PubMedRetriever
__all__ = [
"PubMedRetriever",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/__init__.py | """**Retriever** class returns Documents given a text **query**.
It is more general than a vector store. A retriever does not need to be able to
store documents, only to return (or retrieve) it. Vector stores can be used as
the backbone of a retriever, but there are other types of retrievers as well.
**Class hierarchy:**
.. code-block::
BaseRetriever --> <name>Retriever # Examples: ArxivRetriever, MergerRetriever
**Main helpers:**
.. code-block::
Document, Serializable, Callbacks,
CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.retrievers.arcee import (
ArceeRetriever,
)
from langchain_community.retrievers.arxiv import (
ArxivRetriever,
)
from langchain_community.retrievers.asknews import (
AskNewsRetriever,
)
from langchain_community.retrievers.azure_ai_search import (
AzureAISearchRetriever,
AzureCognitiveSearchRetriever,
)
from langchain_community.retrievers.bedrock import (
AmazonKnowledgeBasesRetriever,
)
from langchain_community.retrievers.bm25 import (
BM25Retriever,
)
from langchain_community.retrievers.breebs import (
BreebsRetriever,
)
from langchain_community.retrievers.chaindesk import (
ChaindeskRetriever,
)
from langchain_community.retrievers.chatgpt_plugin_retriever import (
ChatGPTPluginRetriever,
)
from langchain_community.retrievers.cohere_rag_retriever import (
CohereRagRetriever,
)
from langchain_community.retrievers.docarray import (
DocArrayRetriever,
)
from langchain_community.retrievers.dria_index import (
DriaRetriever,
)
from langchain_community.retrievers.elastic_search_bm25 import (
ElasticSearchBM25Retriever,
)
from langchain_community.retrievers.embedchain import (
EmbedchainRetriever,
)
from langchain_community.retrievers.google_cloud_documentai_warehouse import (
GoogleDocumentAIWarehouseRetriever,
)
from langchain_community.retrievers.google_vertex_ai_search import (
GoogleCloudEnterpriseSearchRetriever,
GoogleVertexAIMultiTurnSearchRetriever,
GoogleVertexAISearchRetriever,
)
from langchain_community.retrievers.kay import (
KayAiRetriever,
)
from langchain_community.retrievers.kendra import (
AmazonKendraRetriever,
)
from langchain_community.retrievers.knn import (
KNNRetriever,
)
from langchain_community.retrievers.llama_index import (
LlamaIndexGraphRetriever,
LlamaIndexRetriever,
)
from langchain_community.retrievers.metal import (
MetalRetriever,
)
from langchain_community.retrievers.milvus import (
MilvusRetriever,
)
from langchain_community.retrievers.nanopq import NanoPQRetriever
from langchain_community.retrievers.needle import NeedleRetriever
from langchain_community.retrievers.outline import (
OutlineRetriever,
)
from langchain_community.retrievers.pinecone_hybrid_search import (
PineconeHybridSearchRetriever,
)
from langchain_community.retrievers.pubmed import (
PubMedRetriever,
)
from langchain_community.retrievers.qdrant_sparse_vector_retriever import (
QdrantSparseVectorRetriever,
)
from langchain_community.retrievers.rememberizer import (
RememberizerRetriever,
)
from langchain_community.retrievers.remote_retriever import (
RemoteLangChainRetriever,
)
from langchain_community.retrievers.svm import (
SVMRetriever,
)
from langchain_community.retrievers.tavily_search_api import (
TavilySearchAPIRetriever,
)
from langchain_community.retrievers.tfidf import (
TFIDFRetriever,
)
from langchain_community.retrievers.thirdai_neuraldb import NeuralDBRetriever
from langchain_community.retrievers.vespa_retriever import (
VespaRetriever,
)
from langchain_community.retrievers.weaviate_hybrid_search import (
WeaviateHybridSearchRetriever,
)
from langchain_community.retrievers.web_research import WebResearchRetriever
from langchain_community.retrievers.wikipedia import (
WikipediaRetriever,
)
from langchain_community.retrievers.you import (
YouRetriever,
)
from langchain_community.retrievers.zep import (
ZepRetriever,
)
from langchain_community.retrievers.zep_cloud import (
ZepCloudRetriever,
)
from langchain_community.retrievers.zilliz import (
ZillizRetriever,
)
_module_lookup = {
"AmazonKendraRetriever": "langchain_community.retrievers.kendra",
"AmazonKnowledgeBasesRetriever": "langchain_community.retrievers.bedrock",
"ArceeRetriever": "langchain_community.retrievers.arcee",
"ArxivRetriever": "langchain_community.retrievers.arxiv",
"AskNewsRetriever": "langchain_community.retrievers.asknews",
"AzureAISearchRetriever": "langchain_community.retrievers.azure_ai_search",
"AzureCognitiveSearchRetriever": "langchain_community.retrievers.azure_ai_search",
"BM25Retriever": "langchain_community.retrievers.bm25",
"BreebsRetriever": "langchain_community.retrievers.breebs",
"ChaindeskRetriever": "langchain_community.retrievers.chaindesk",
"ChatGPTPluginRetriever": "langchain_community.retrievers.chatgpt_plugin_retriever",
"CohereRagRetriever": "langchain_community.retrievers.cohere_rag_retriever",
"DocArrayRetriever": "langchain_community.retrievers.docarray",
"DriaRetriever": "langchain_community.retrievers.dria_index",
"ElasticSearchBM25Retriever": "langchain_community.retrievers.elastic_search_bm25",
"EmbedchainRetriever": "langchain_community.retrievers.embedchain",
"GoogleCloudEnterpriseSearchRetriever": "langchain_community.retrievers.google_vertex_ai_search", # noqa: E501
"GoogleDocumentAIWarehouseRetriever": "langchain_community.retrievers.google_cloud_documentai_warehouse", # noqa: E501
"GoogleVertexAIMultiTurnSearchRetriever": "langchain_community.retrievers.google_vertex_ai_search", # noqa: E501
"GoogleVertexAISearchRetriever": "langchain_community.retrievers.google_vertex_ai_search", # noqa: E501
"KNNRetriever": "langchain_community.retrievers.knn",
"KayAiRetriever": "langchain_community.retrievers.kay",
"LlamaIndexGraphRetriever": "langchain_community.retrievers.llama_index",
"LlamaIndexRetriever": "langchain_community.retrievers.llama_index",
"MetalRetriever": "langchain_community.retrievers.metal",
"MilvusRetriever": "langchain_community.retrievers.milvus",
"NanoPQRetriever": "langchain_community.retrievers.nanopq",
"NeedleRetriever": "langchain_community.retrievers.needle",
"OutlineRetriever": "langchain_community.retrievers.outline",
"PineconeHybridSearchRetriever": "langchain_community.retrievers.pinecone_hybrid_search", # noqa: E501
"PubMedRetriever": "langchain_community.retrievers.pubmed",
"QdrantSparseVectorRetriever": "langchain_community.retrievers.qdrant_sparse_vector_retriever", # noqa: E501
"RememberizerRetriever": "langchain_community.retrievers.rememberizer",
"RemoteLangChainRetriever": "langchain_community.retrievers.remote_retriever",
"SVMRetriever": "langchain_community.retrievers.svm",
"TFIDFRetriever": "langchain_community.retrievers.tfidf",
"TavilySearchAPIRetriever": "langchain_community.retrievers.tavily_search_api",
"VespaRetriever": "langchain_community.retrievers.vespa_retriever",
"WeaviateHybridSearchRetriever": "langchain_community.retrievers.weaviate_hybrid_search", # noqa: E501
"WebResearchRetriever": "langchain_community.retrievers.web_research",
"WikipediaRetriever": "langchain_community.retrievers.wikipedia",
"YouRetriever": "langchain_community.retrievers.you",
"ZepRetriever": "langchain_community.retrievers.zep",
"ZepCloudRetriever": "langchain_community.retrievers.zep_cloud",
"ZillizRetriever": "langchain_community.retrievers.zilliz",
"NeuralDBRetriever": "langchain_community.retrievers.thirdai_neuraldb",
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")
__all__ = [
"AmazonKendraRetriever",
"AmazonKnowledgeBasesRetriever",
"ArceeRetriever",
"ArxivRetriever",
"AskNewsRetriever",
"AzureAISearchRetriever",
"AzureCognitiveSearchRetriever",
"BM25Retriever",
"BreebsRetriever",
"ChaindeskRetriever",
"ChatGPTPluginRetriever",
"CohereRagRetriever",
"DocArrayRetriever",
"DriaRetriever",
"ElasticSearchBM25Retriever",
"EmbedchainRetriever",
"GoogleCloudEnterpriseSearchRetriever",
"GoogleDocumentAIWarehouseRetriever",
"GoogleVertexAIMultiTurnSearchRetriever",
"GoogleVertexAISearchRetriever",
"KayAiRetriever",
"KNNRetriever",
"LlamaIndexGraphRetriever",
"LlamaIndexRetriever",
"MetalRetriever",
"MilvusRetriever",
"NanoPQRetriever",
"NeedleRetriever",
"NeuralDBRetriever",
"OutlineRetriever",
"PineconeHybridSearchRetriever",
"PubMedRetriever",
"QdrantSparseVectorRetriever",
"RememberizerRetriever",
"RemoteLangChainRetriever",
"SVMRetriever",
"TavilySearchAPIRetriever",
"TFIDFRetriever",
"VespaRetriever",
"WeaviateHybridSearchRetriever",
"WebResearchRetriever",
"WikipediaRetriever",
"YouRetriever",
"ZepRetriever",
"ZepCloudRetriever",
"ZillizRetriever",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/bm25.py | from __future__ import annotations
from typing import Any, Callable, Dict, Iterable, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import ConfigDict, Field
def default_preprocessing_func(text: str) -> List[str]:
return text.split()
class BM25Retriever(BaseRetriever):
"""`BM25` retriever without Elasticsearch."""
vectorizer: Any = None
""" BM25 vectorizer."""
docs: List[Document] = Field(repr=False)
""" List of documents."""
k: int = 4
""" Number of documents to return."""
preprocess_func: Callable[[str], List[str]] = default_preprocessing_func
""" Preprocessing function to use on the text before BM25 vectorization."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@classmethod
def from_texts(
cls,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
ids: Optional[Iterable[str]] = None,
bm25_params: Optional[Dict[str, Any]] = None,
preprocess_func: Callable[[str], List[str]] = default_preprocessing_func,
**kwargs: Any,
) -> BM25Retriever:
"""
Create a BM25Retriever from a list of texts.
Args:
texts: A list of texts to vectorize.
metadatas: A list of metadata dicts to associate with each text.
ids: A list of ids to associate with each text.
bm25_params: Parameters to pass to the BM25 vectorizer.
preprocess_func: A function to preprocess each text before vectorization.
**kwargs: Any other arguments to pass to the retriever.
Returns:
A BM25Retriever instance.
"""
try:
from rank_bm25 import BM25Okapi
except ImportError:
raise ImportError(
"Could not import rank_bm25, please install with `pip install "
"rank_bm25`."
)
texts_processed = [preprocess_func(t) for t in texts]
bm25_params = bm25_params or {}
vectorizer = BM25Okapi(texts_processed, **bm25_params)
metadatas = metadatas or ({} for _ in texts)
if ids:
docs = [
Document(page_content=t, metadata=m, id=i)
for t, m, i in zip(texts, metadatas, ids)
]
else:
docs = [
Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)
]
return cls(
vectorizer=vectorizer, docs=docs, preprocess_func=preprocess_func, **kwargs
)
@classmethod
def from_documents(
cls,
documents: Iterable[Document],
*,
bm25_params: Optional[Dict[str, Any]] = None,
preprocess_func: Callable[[str], List[str]] = default_preprocessing_func,
**kwargs: Any,
) -> BM25Retriever:
"""
Create a BM25Retriever from a list of Documents.
Args:
documents: A list of Documents to vectorize.
bm25_params: Parameters to pass to the BM25 vectorizer.
preprocess_func: A function to preprocess each text before vectorization.
**kwargs: Any other arguments to pass to the retriever.
Returns:
A BM25Retriever instance.
"""
texts, metadatas, ids = zip(
*((d.page_content, d.metadata, d.id) for d in documents)
)
return cls.from_texts(
texts=texts,
bm25_params=bm25_params,
metadatas=metadatas,
ids=ids,
preprocess_func=preprocess_func,
**kwargs,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
processed_query = self.preprocess_func(query)
return_docs = self.vectorizer.get_top_n(processed_query, self.docs, n=self.k)
return return_docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/breebs.py | from typing import List
import requests
from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
from langchain_core.documents.base import Document
from langchain_core.retrievers import BaseRetriever
class BreebsRetriever(BaseRetriever):
"""A retriever class for `Breebs`.
See https://www.breebs.com/ for more info.
Args:
breeb_key: The key to trigger the breeb
(specialized knowledge pill on a specific topic).
To retrieve the list of all available Breebs : you can call https://breebs.promptbreeders.com/web/listbreebs
"""
breeb_key: str
url: str = "https://breebs.promptbreeders.com/knowledge"
def __init__(self, breeb_key: str):
super().__init__(breeb_key=breeb_key) # type: ignore[call-arg]
self.breeb_key = breeb_key
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Retrieve context for given query.
Note that for time being there is no score."""
r = requests.post(
self.url,
json={
"breeb_key": self.breeb_key,
"query": query,
},
)
if r.status_code != 200:
return []
else:
chunks = r.json()
return [
Document(
page_content=chunk["content"],
metadata={"source": chunk["source_url"], "score": 1},
)
for chunk in chunks
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/elastic_search_bm25.py | """Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
class ElasticSearchBM25Retriever(BaseRetriever):
"""`Elasticsearch` retriever that uses `BM25`.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
"""
client: Any
"""Elasticsearch client."""
index_name: str
"""Name of the index to use in Elasticsearch."""
@classmethod
def create(
cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75
) -> ElasticSearchBM25Retriever:
"""
Create a ElasticSearchBM25Retriever from a list of texts.
Args:
elasticsearch_url: URL of the Elasticsearch instance to connect to.
index_name: Name of the index to use in Elasticsearch.
k1: BM25 parameter k1.
b: BM25 parameter b.
Returns:
"""
from elasticsearch import Elasticsearch
# Create an Elasticsearch client instance
es = Elasticsearch(elasticsearch_url)
# Define the index settings and mappings
settings = {
"analysis": {"analyzer": {"default": {"type": "standard"}}},
"similarity": {
"custom_bm25": {
"type": "BM25",
"k1": k1,
"b": b,
}
},
}
mappings = {
"properties": {
"content": {
"type": "text",
"similarity": "custom_bm25", # Use the custom BM25 similarity
}
}
}
# Create the index with the specified settings and mappings
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(client=es, index_name=index_name)
def add_texts(
self,
texts: Iterable[str],
refresh_indices: bool = True,
) -> List[str]:
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings to add to the retriever.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the retriever.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
for i, text in enumerate(texts):
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"content": text,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
query_dict = {"query": {"match": {"content": query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res["hits"]["hits"]:
docs.append(Document(page_content=r["_source"]["content"]))
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/milvus.py | """Milvus Retriever"""
import warnings
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.retrievers import BaseRetriever
from pydantic import model_validator
from langchain_community.vectorstores.milvus import Milvus
# TODO: Update to MilvusClient + Hybrid Search when available
class MilvusRetriever(BaseRetriever):
"""Milvus API retriever.
See detailed instructions here: https://python.langchain.com/docs/integrations/retrievers/milvus_hybrid_search/
Setup:
Install ``langchain-milvus`` and other dependencies:
.. code-block:: bash
pip install -U pymilvus[model] langchain-milvus
Key init args:
collection: Milvus Collection
Instantiate:
.. code-block:: python
retriever = MilvusCollectionHybridSearchRetriever(collection=collection)
Usage:
.. code-block:: python
query = "What are the story about ventures?"
retriever.invoke(query)
.. code-block:: none
[Document(page_content="In 'The Lost Expedition' by Caspian Grey...", metadata={'doc_id': '449281835035545843'}),
Document(page_content="In 'The Phantom Pilgrim' by Rowan Welles...", metadata={'doc_id': '449281835035545845'}),
Document(page_content="In 'The Dreamwalker's Journey' by Lyra Snow..", metadata={'doc_id': '449281835035545846'})]
Use within a chain:
.. code-block:: python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
chain.invoke("What novels has Lila written and what are their contents?")
.. code-block:: none
"Lila Rose has written 'The Memory Thief,' which follows a charismatic thief..."
""" # noqa: E501
embedding_function: Embeddings
collection_name: str = "LangChainCollection"
collection_properties: Optional[Dict[str, Any]] = None
connection_args: Optional[Dict[str, Any]] = None
consistency_level: str = "Session"
search_params: Optional[dict] = None
store: Milvus
retriever: BaseRetriever
@model_validator(mode="before")
@classmethod
def create_retriever(cls, values: Dict) -> Any:
"""Create the Milvus store and retriever."""
values["store"] = Milvus(
values["embedding_function"],
values["collection_name"],
values["collection_properties"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.invoke(
query, run_manager=run_manager.get_child(), **kwargs
)
def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever:
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. "
"Please use MilvusRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return MilvusRetriever(*args, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/thirdai_neuraldb.py | from __future__ import annotations
import importlib
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import ConfigDict, SecretStr
class NeuralDBRetriever(BaseRetriever):
"""Document retriever that uses ThirdAI's NeuralDB."""
thirdai_key: SecretStr
"""ThirdAI API Key"""
db: Any = None #: :meta private:
"""NeuralDB instance"""
model_config = ConfigDict(
extra="forbid",
)
@staticmethod
def _verify_thirdai_library(thirdai_key: Optional[str] = None) -> None:
try:
from thirdai import licensing
importlib.util.find_spec("thirdai.neural_db")
licensing.activate(thirdai_key or os.getenv("THIRDAI_KEY"))
except ImportError:
raise ImportError(
"Could not import thirdai python package and neuraldb dependencies. "
"Please install it with `pip install thirdai[neural_db]`."
)
@classmethod
def from_scratch(
cls,
thirdai_key: Optional[str] = None,
**model_kwargs: dict,
) -> NeuralDBRetriever:
"""
Create a NeuralDBRetriever from scratch.
To use, set the ``THIRDAI_KEY`` environment variable with your ThirdAI
API key, or pass ``thirdai_key`` as a named parameter.
Example:
.. code-block:: python
from langchain_community.retrievers import NeuralDBRetriever
retriever = NeuralDBRetriever.from_scratch(
thirdai_key="your-thirdai-key",
)
retriever.insert([
"/path/to/doc.pdf",
"/path/to/doc.docx",
"/path/to/doc.csv",
])
documents = retriever.invoke("AI-driven music therapy")
"""
NeuralDBRetriever._verify_thirdai_library(thirdai_key)
from thirdai import neural_db as ndb
return cls(thirdai_key=thirdai_key, db=ndb.NeuralDB(**model_kwargs)) # type: ignore[arg-type]
@classmethod
def from_checkpoint(
cls,
checkpoint: Union[str, Path],
thirdai_key: Optional[str] = None,
) -> NeuralDBRetriever:
"""
Create a NeuralDBRetriever with a base model from a saved checkpoint
To use, set the ``THIRDAI_KEY`` environment variable with your ThirdAI
API key, or pass ``thirdai_key`` as a named parameter.
Example:
.. code-block:: python
from langchain_community.retrievers import NeuralDBRetriever
retriever = NeuralDBRetriever.from_checkpoint(
checkpoint="/path/to/checkpoint.ndb",
thirdai_key="your-thirdai-key",
)
retriever.insert([
"/path/to/doc.pdf",
"/path/to/doc.docx",
"/path/to/doc.csv",
])
documents = retriever.invoke("AI-driven music therapy")
"""
NeuralDBRetriever._verify_thirdai_library(thirdai_key)
from thirdai import neural_db as ndb
return cls(thirdai_key=thirdai_key, db=ndb.NeuralDB.from_checkpoint(checkpoint)) # type: ignore[arg-type]
@pre_init
def validate_environments(cls, values: Dict) -> Dict:
"""Validate ThirdAI environment variables."""
values["thirdai_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"thirdai_key",
"THIRDAI_KEY",
)
)
return values
def insert(
self,
sources: List[Any],
train: bool = True,
fast_mode: bool = True,
**kwargs: dict,
) -> None:
"""Inserts files / document sources into the retriever.
Args:
train: When True this means that the underlying model in the
NeuralDB will undergo unsupervised pretraining on the inserted files.
Defaults to True.
fast_mode: Much faster insertion with a slight drop in performance.
Defaults to True.
"""
sources = self._preprocess_sources(sources)
self.db.insert(
sources=sources,
train=train,
fast_approximation=fast_mode,
**kwargs,
)
def _preprocess_sources(self, sources: list) -> list:
"""Checks if the provided sources are string paths. If they are, convert
to NeuralDB document objects.
Args:
sources: list of either string paths to PDF, DOCX or CSV files, or
NeuralDB document objects.
"""
from thirdai import neural_db as ndb
if not sources:
return sources
preprocessed_sources = []
for doc in sources:
if not isinstance(doc, str):
preprocessed_sources.append(doc)
else:
if doc.lower().endswith(".pdf"):
preprocessed_sources.append(ndb.PDF(doc))
elif doc.lower().endswith(".docx"):
preprocessed_sources.append(ndb.DOCX(doc))
elif doc.lower().endswith(".csv"):
preprocessed_sources.append(ndb.CSV(doc))
else:
raise RuntimeError(
f"Could not automatically load {doc}. Only files "
"with .pdf, .docx, or .csv extensions can be loaded "
"automatically. For other formats, please use the "
"appropriate document object from the ThirdAI library."
)
return preprocessed_sources
def upvote(self, query: str, document_id: int) -> None:
"""The retriever upweights the score of a document for a specific query.
This is useful for fine-tuning the retriever to user behavior.
Args:
query: text to associate with `document_id`
document_id: id of the document to associate query with.
"""
self.db.text_to_result(query, document_id)
def upvote_batch(self, query_id_pairs: List[Tuple[str, int]]) -> None:
"""Given a batch of (query, document id) pairs, the retriever upweights
the scores of the document for the corresponding queries.
This is useful for fine-tuning the retriever to user behavior.
Args:
query_id_pairs: list of (query, document id) pairs. For each pair in
this list, the model will upweight the document id for the query.
"""
self.db.text_to_result_batch(query_id_pairs)
def associate(self, source: str, target: str) -> None:
"""The retriever associates a source phrase with a target phrase.
When the retriever sees the source phrase, it will also consider results
that are relevant to the target phrase.
Args:
source: text to associate to `target`.
target: text to associate `source` to.
"""
self.db.associate(source, target)
def associate_batch(self, text_pairs: List[Tuple[str, str]]) -> None:
"""Given a batch of (source, target) pairs, the retriever associates
each source phrase with the corresponding target phrase.
Args:
text_pairs: list of (source, target) text pairs. For each pair in
this list, the source will be associated with the target.
"""
self.db.associate_batch(text_pairs)
def _get_relevant_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
"""Retrieve {top_k} contexts with your retriever for a given query
Args:
query: Query to submit to the model
top_k: The max number of context results to retrieve. Defaults to 10.
"""
try:
if "top_k" not in kwargs:
kwargs["top_k"] = 10
references = self.db.search(query=query, **kwargs)
return [
Document(
page_content=ref.text,
metadata={
"id": ref.id,
"upvote_ids": ref.upvote_ids,
"source": ref.source,
"metadata": ref.metadata,
"score": ref.score,
"context": ref.context(1),
},
)
for ref in references
]
except Exception as e:
raise ValueError(f"Error while retrieving documents: {e}") from e
def save(self, path: str) -> None:
"""Saves a NeuralDB instance to disk. Can be loaded into memory by
calling NeuralDB.from_checkpoint(path)
Args:
path: path on disk to save the NeuralDB instance to.
"""
self.db.save(path)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/zep_cloud.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import model_validator
if TYPE_CHECKING:
from zep_cloud import MemorySearchResult, SearchScope, SearchType
from zep_cloud.client import AsyncZep, Zep
class ZepCloudRetriever(BaseRetriever):
"""`Zep Cloud` MemoryStore Retriever.
Search your user's long-term chat history with Zep.
Zep offers both simple semantic search and Maximal Marginal Relevance (MMR)
reranking of search results.
Note: You will need to provide the user's `session_id` to use this retriever.
Args:
api_key: Your Zep API key
session_id: Identifies your user or a user's session (required)
top_k: Number of documents to return (default: 3, optional)
search_type: Type of search to perform (similarity / mmr)
(default: similarity, optional)
mmr_lambda: Lambda value for MMR search. Defaults to 0.5 (optional)
Zep - Recall, understand, and extract data from chat histories.
Power personalized AI experiences.
=========
Zep is a long-term memory service for AI Assistant apps.
With Zep, you can provide AI assistants with the ability
to recall past conversations,
no matter how distant, while also reducing hallucinations, latency, and cost.
see Zep Cloud Docs: https://help.getzep.com
"""
api_key: str
"""Your Zep API key."""
zep_client: Zep
"""Zep client used for making API requests."""
zep_client_async: AsyncZep
"""Async Zep client used for making API requests."""
session_id: str
"""Zep session ID."""
top_k: Optional[int]
"""Number of items to return."""
search_scope: SearchScope = "messages"
"""Which documents to search. Messages or Summaries?"""
search_type: SearchType = "similarity"
"""Type of search to perform (similarity / mmr)"""
mmr_lambda: Optional[float] = None
"""Lambda value for MMR search."""
@model_validator(mode="before")
@classmethod
def create_client(cls, values: dict) -> Any:
try:
from zep_cloud.client import AsyncZep, Zep
except ImportError:
raise ImportError(
"Could not import zep-cloud package. "
"Please install it with `pip install zep-cloud`."
)
if values.get("api_key") is None:
raise ValueError("Zep API key is required.")
values["zep_client"] = Zep(api_key=values.get("api_key"))
values["zep_client_async"] = AsyncZep(api_key=values.get("api_key"))
return values
def _messages_search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=str(r.message.content),
metadata={
"score": r.score,
"uuid": r.message.uuid_,
"created_at": r.message.created_at,
"token_count": r.message.token_count,
"role": r.message.role or r.message.role_type,
},
)
for r in results or []
if r.message
]
def _summary_search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=str(r.summary.content),
metadata={
"score": r.score,
"uuid": r.summary.uuid_,
"created_at": r.summary.created_at,
"token_count": r.summary.token_count,
},
)
for r in results
if r.summary
]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
if not self.zep_client:
raise RuntimeError("Zep client not initialized.")
results = self.zep_client.memory.search(
self.session_id,
text=query,
metadata=metadata,
search_scope=self.search_scope,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
limit=self.top_k,
)
if self.search_scope == "summary":
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
if not self.zep_client_async:
raise RuntimeError("Zep client not initialized.")
results = await self.zep_client_async.memory.search(
self.session_id,
text=query,
metadata=metadata,
search_scope=self.search_scope,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
limit=self.top_k,
)
if self.search_scope == "summary":
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/you.py | from typing import Any, List
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_community.utilities import YouSearchAPIWrapper
class YouRetriever(BaseRetriever, YouSearchAPIWrapper):
"""You.com Search API retriever.
It wraps results() to get_relevant_documents
It uses all YouSearchAPIWrapper arguments without any change.
"""
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.results(query, run_manager=run_manager.get_child(), **kwargs)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
results = await self.results_async(
query, run_manager=run_manager.get_child(), **kwargs
)
return results
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/needle.py | from typing import Any, List, Optional # noqa: I001
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import BaseModel, Field
class NeedleRetriever(BaseRetriever, BaseModel):
"""
NeedleRetriever retrieves relevant documents or context from a Needle collection
based on a search query.
Setup:
Install the `needle-python` library and set your Needle API key.
.. code-block:: bash
pip install needle-python
export NEEDLE_API_KEY="your-api-key"
Key init args:
- `needle_api_key` (Optional[str]): The API key for authenticating with Needle.
- `collection_id` (str): The ID of the Needle collection to search in.
- `client` (Optional[NeedleClient]): An optional instance of the NeedleClient.
Usage:
.. code-block:: python
from langchain_community.retrievers.needle import NeedleRetriever
retriever = NeedleRetriever(
needle_api_key="your-api-key",
collection_id="your-collection-id"
)
results = retriever.retrieve("example query")
for doc in results:
print(doc.page_content)
"""
client: Optional[Any] = None
"""Optional instance of NeedleClient."""
needle_api_key: Optional[str] = Field(None, description="Needle API Key")
collection_id: Optional[str] = Field(
..., description="The ID of the Needle collection to search in"
)
def _initialize_client(self) -> None:
"""
Initialize the NeedleClient with the provided API key.
If a client instance is already provided, this method does nothing.
"""
try:
from needle.v1 import NeedleClient
except ImportError:
raise ImportError("Please install with `pip install needle-python`.")
if not self.client:
self.client = NeedleClient(api_key=self.needle_api_key)
def _search_collection(self, query: str) -> List[Document]:
"""
Search the Needle collection for relevant documents.
Args:
query (str): The search query used to find relevant documents.
Returns:
List[Document]: A list of documents matching the search query.
"""
self._initialize_client()
if self.client is None:
raise ValueError("NeedleClient is not initialized. Provide an API key.")
results = self.client.collections.search(
collection_id=self.collection_id, text=query
)
docs = [Document(page_content=result.content) for result in results]
return docs
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""
Retrieve relevant documents based on the query.
Args:
query (str): The query string used to search the collection.
Returns:
List[Document]: A list of documents relevant to the query.
"""
# The `run_manager` parameter is included to match the superclass signature,
# but it is not used in this implementation.
return self._search_collection(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/retrievers/llama_index.py | from typing import Any, Dict, List, cast
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field
class LlamaIndexRetriever(BaseRetriever):
"""`LlamaIndex` retriever.
It is used for the question-answering with sources over
an LlamaIndex data structure."""
index: Any = None
"""LlamaIndex index to query."""
query_kwargs: Dict = Field(default_factory=dict)
"""Keyword arguments to pass to the query method."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.core.base.response.schema import Response
from llama_index.core.indices.base import BaseGPTIndex
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
index = cast(BaseGPTIndex, self.index)
response = index.query(query, **self.query_kwargs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.metadata or {}
docs.append(
Document(page_content=source_node.get_content(), metadata=metadata)
)
return docs
class LlamaIndexGraphRetriever(BaseRetriever):
"""`LlamaIndex` graph data structure retriever.
It is used for question-answering with sources over an LlamaIndex
graph data structure."""
graph: Any = None
"""LlamaIndex graph to query."""
query_configs: List[Dict] = Field(default_factory=list)
"""List of query configs to pass to the query method."""
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant for a query."""
try:
from llama_index.core.base.response.schema import Response
from llama_index.core.composability.base import (
QUERY_CONFIG_TYPE,
ComposableGraph,
)
except ImportError:
raise ImportError(
"You need to install `pip install llama-index` to use this retriever."
)
graph = cast(ComposableGraph, self.graph)
# for now, inject response_mode="no_text" into query configs
for query_config in self.query_configs:
query_config["response_mode"] = "no_text"
query_configs = cast(List[QUERY_CONFIG_TYPE], self.query_configs)
response = graph.query(query, query_configs=query_configs)
response = cast(Response, response)
# parse source nodes
docs = []
for source_node in response.source_nodes:
metadata = source_node.metadata or {}
docs.append(
Document(page_content=source_node.get_content(), metadata=metadata)
)
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/graph_vectorstores/cassandra.py | """Apache Cassandra DB graph vector store integration."""
from __future__ import annotations
import asyncio
import json
import logging
import secrets
from dataclasses import asdict, is_dataclass
from typing import (
TYPE_CHECKING,
Any,
AsyncIterable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
cast,
)
from langchain_core._api import beta
from langchain_core.documents import Document
from typing_extensions import override
from langchain_community.graph_vectorstores.base import GraphVectorStore, Node
from langchain_community.graph_vectorstores.links import METADATA_LINKS_KEY, Link
from langchain_community.graph_vectorstores.mmr_helper import MmrHelper
from langchain_community.utilities.cassandra import SetupMode
from langchain_community.vectorstores.cassandra import Cassandra as CassandraVectorStore
CGVST = TypeVar("CGVST", bound="CassandraGraphVectorStore")
if TYPE_CHECKING:
from cassandra.cluster import Session
from langchain_core.embeddings import Embeddings
logger = logging.getLogger(__name__)
class AdjacentNode:
id: str
links: list[Link]
embedding: list[float]
def __init__(self, node: Node, embedding: list[float]) -> None:
"""Create an Adjacent Node."""
self.id = node.id or ""
self.links = node.links
self.embedding = embedding
def _serialize_links(links: list[Link]) -> str:
class SetAndLinkEncoder(json.JSONEncoder):
def default(self, obj: Any) -> Any: # noqa: ANN401
if not isinstance(obj, type) and is_dataclass(obj):
return asdict(obj)
if isinstance(obj, Iterable):
return list(obj)
# Let the base class default method raise the TypeError
return super().default(obj)
return json.dumps(links, cls=SetAndLinkEncoder)
def _deserialize_links(json_blob: str | None) -> set[Link]:
return {
Link(kind=link["kind"], direction=link["direction"], tag=link["tag"])
for link in cast(list[dict[str, Any]], json.loads(json_blob or "[]"))
}
def _metadata_link_key(link: Link) -> str:
return f"link:{link.kind}:{link.tag}"
def _metadata_link_value() -> str:
return "link"
def _doc_to_node(doc: Document) -> Node:
metadata = doc.metadata.copy()
links = _deserialize_links(metadata.get(METADATA_LINKS_KEY))
metadata[METADATA_LINKS_KEY] = links
return Node(
id=doc.id,
text=doc.page_content,
metadata=metadata,
links=list(links),
)
def _incoming_links(node: Node | AdjacentNode) -> set[Link]:
return {link for link in node.links if link.direction in ["in", "bidir"]}
def _outgoing_links(node: Node | AdjacentNode) -> set[Link]:
return {link for link in node.links if link.direction in ["out", "bidir"]}
@beta()
class CassandraGraphVectorStore(GraphVectorStore):
def __init__(
self,
embedding: Embeddings,
session: Session | None = None,
keyspace: str | None = None,
table_name: str = "",
ttl_seconds: int | None = None,
*,
body_index_options: list[tuple[str, Any]] | None = None,
setup_mode: SetupMode = SetupMode.SYNC,
metadata_deny_list: Optional[list[str]] = None,
) -> None:
"""Apache Cassandra(R) for graph-vector-store workloads.
To use it, you need a recent installation of the `cassio` library
and a Cassandra cluster / Astra DB instance supporting vector capabilities.
Example:
.. code-block:: python
from langchain_community.graph_vectorstores import
CassandraGraphVectorStore
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
session = ... # create your Cassandra session object
keyspace = 'my_keyspace' # the keyspace should exist already
table_name = 'my_graph_vector_store'
vectorstore = CassandraGraphVectorStore(
embeddings,
session,
keyspace,
table_name,
)
Args:
embedding: Embedding function to use.
session: Cassandra driver session. If not provided, it is resolved from
cassio.
keyspace: Cassandra keyspace. If not provided, it is resolved from cassio.
table_name: Cassandra table (required).
ttl_seconds: Optional time-to-live for the added texts.
body_index_options: Optional options used to create the body index.
Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER]
setup_mode: mode used to create the Cassandra table (SYNC,
ASYNC or OFF).
metadata_deny_list: Optional list of metadata keys to not index.
i.e. to fine-tune which of the metadata fields are indexed.
Note: if you plan to have massive unique text metadata entries,
consider not indexing them for performance
(and to overcome max-length limitations).
Note: the `metadata_indexing` parameter from
langchain_community.utilities.cassandra.Cassandra is not
exposed since CassandraGraphVectorStore only supports the
deny_list option.
"""
self.embedding = embedding
if metadata_deny_list is None:
metadata_deny_list = []
metadata_deny_list.append(METADATA_LINKS_KEY)
self.vector_store = CassandraVectorStore(
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
ttl_seconds=ttl_seconds,
body_index_options=body_index_options,
setup_mode=setup_mode,
metadata_indexing=("deny_list", metadata_deny_list),
)
store_session: Session = self.vector_store.session
self._insert_node = store_session.prepare(
f"""
INSERT INTO {keyspace}.{table_name} (
row_id, body_blob, vector, attributes_blob, metadata_s
) VALUES (?, ?, ?, ?, ?)
""" # noqa: S608
)
@property
@override
def embeddings(self) -> Embeddings | None:
return self.embedding
def _get_metadata_filter(
self,
metadata: dict[str, Any] | None = None,
outgoing_link: Link | None = None,
) -> dict[str, Any]:
if outgoing_link is None:
return metadata or {}
metadata_filter = {} if metadata is None else metadata.copy()
metadata_filter[_metadata_link_key(link=outgoing_link)] = _metadata_link_value()
return metadata_filter
def _restore_links(self, doc: Document) -> Document:
"""Restores the links in the document by deserializing them from metadata.
Args:
doc: A single Document
Returns:
The same Document with restored links.
"""
links = _deserialize_links(doc.metadata.get(METADATA_LINKS_KEY))
doc.metadata[METADATA_LINKS_KEY] = links
# TODO: Could this be skipped if we put these metadata entries
# only in the searchable `metadata_s` column?
for incoming_link_key in [
_metadata_link_key(link=link)
for link in links
if link.direction in ["in", "bidir"]
]:
if incoming_link_key in doc.metadata:
del doc.metadata[incoming_link_key]
return doc
def _get_node_metadata_for_insertion(self, node: Node) -> dict[str, Any]:
metadata = node.metadata.copy()
metadata[METADATA_LINKS_KEY] = _serialize_links(node.links)
# TODO: Could we could put these metadata entries
# only in the searchable `metadata_s` column?
for incoming_link in _incoming_links(node=node):
metadata[_metadata_link_key(link=incoming_link)] = _metadata_link_value()
return metadata
def _get_docs_for_insertion(
self, nodes: Iterable[Node]
) -> tuple[list[Document], list[str]]:
docs = []
ids = []
for node in nodes:
node_id = secrets.token_hex(8) if not node.id else node.id
doc = Document(
page_content=node.text,
metadata=self._get_node_metadata_for_insertion(node=node),
id=node_id,
)
docs.append(doc)
ids.append(node_id)
return (docs, ids)
@override
def add_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> Iterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
**kwargs: Additional keyword arguments.
"""
(docs, ids) = self._get_docs_for_insertion(nodes=nodes)
return self.vector_store.add_documents(docs, ids=ids)
@override
async def aadd_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> AsyncIterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
**kwargs: Additional keyword arguments.
"""
(docs, ids) = self._get_docs_for_insertion(nodes=nodes)
for inserted_id in await self.vector_store.aadd_documents(docs, ids=ids):
yield inserted_id
@override
def similarity_search(
self,
query: str,
k: int = 4,
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> list[Document]:
"""Retrieve documents from this graph store.
Args:
query: The query string.
k: The number of Documents to return. Defaults to 4.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
return [
self._restore_links(doc)
for doc in self.vector_store.similarity_search(
query=query,
k=k,
filter=filter,
**kwargs,
)
]
@override
async def asimilarity_search(
self,
query: str,
k: int = 4,
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> list[Document]:
"""Retrieve documents from this graph store.
Args:
query: The query string.
k: The number of Documents to return. Defaults to 4.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
return [
self._restore_links(doc)
for doc in await self.vector_store.asimilarity_search(
query=query,
k=k,
filter=filter,
**kwargs,
)
]
@override
def similarity_search_by_vector(
self,
embedding: list[float],
k: int = 4,
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter on the metadata to apply.
**kwargs: Additional arguments are ignored.
Returns:
The list of Documents most similar to the query vector.
"""
return [
self._restore_links(doc)
for doc in self.vector_store.similarity_search_by_vector(
embedding,
k=k,
filter=filter,
**kwargs,
)
]
@override
async def asimilarity_search_by_vector(
self,
embedding: list[float],
k: int = 4,
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> list[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter on the metadata to apply.
**kwargs: Additional arguments are ignored.
Returns:
The list of Documents most similar to the query vector.
"""
return [
self._restore_links(doc)
for doc in await self.vector_store.asimilarity_search_by_vector(
embedding,
k=k,
filter=filter,
**kwargs,
)
]
def metadata_search(
self,
filter: dict[str, Any] | None = None, # noqa: A002
n: int = 5,
) -> Iterable[Document]:
"""Get documents via a metadata search.
Args:
filter: the metadata to query for.
n: the maximum number of documents to return.
"""
return [
self._restore_links(doc)
for doc in self.vector_store.metadata_search(
filter=filter or {},
n=n,
)
]
async def ametadata_search(
self,
filter: dict[str, Any] | None = None, # noqa: A002
n: int = 5,
) -> Iterable[Document]:
"""Get documents via a metadata search.
Args:
filter: the metadata to query for.
n: the maximum number of documents to return.
"""
return [
self._restore_links(doc)
for doc in await self.vector_store.ametadata_search(
filter=filter or {},
n=n,
)
]
def get_by_document_id(self, document_id: str) -> Document | None:
"""Retrieve a single document from the store, given its document ID.
Args:
document_id: The document ID
Returns:
The the document if it exists. Otherwise None.
"""
doc = self.vector_store.get_by_document_id(document_id=document_id)
return self._restore_links(doc) if doc is not None else None
async def aget_by_document_id(self, document_id: str) -> Document | None:
"""Retrieve a single document from the store, given its document ID.
Args:
document_id: The document ID
Returns:
The the document if it exists. Otherwise None.
"""
doc = await self.vector_store.aget_by_document_id(document_id=document_id)
return self._restore_links(doc) if doc is not None else None
def get_node(self, node_id: str) -> Node | None:
"""Retrieve a single node from the store, given its ID.
Args:
node_id: The node ID
Returns:
The the node if it exists. Otherwise None.
"""
doc = self.vector_store.get_by_document_id(document_id=node_id)
if doc is None:
return None
return _doc_to_node(doc=doc)
@override
async def ammr_traversal_search( # noqa: C901
self,
query: str,
*,
initial_roots: Sequence[str] = (),
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
initial_roots: Optional list of document IDs to use for initializing search.
The top `adjacent_k` nodes adjacent to each initial root will be
included in the set of initial candidates. To fetch only in the
neighborhood of these nodes, set `fetch_k = 0`.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of initial Documents to fetch via similarity.
Will be added to the nodes adjacent to `initial_roots`.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to -infinity.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
"""
query_embedding = self.embedding.embed_query(query)
helper = MmrHelper(
k=k,
query_embedding=query_embedding,
lambda_mult=lambda_mult,
score_threshold=score_threshold,
)
# For each unselected node, stores the outgoing links.
outgoing_links_map: dict[str, set[Link]] = {}
visited_links: set[Link] = set()
# Map from id to Document
retrieved_docs: dict[str, Document] = {}
async def fetch_neighborhood(neighborhood: Sequence[str]) -> None:
nonlocal outgoing_links_map, visited_links, retrieved_docs
# Put the neighborhood into the outgoing links, to avoid adding it
# to the candidate set in the future.
outgoing_links_map.update(
{content_id: set() for content_id in neighborhood}
)
# Initialize the visited_links with the set of outgoing links from the
# neighborhood. This prevents re-visiting them.
visited_links = await self._get_outgoing_links(neighborhood)
# Call `self._get_adjacent` to fetch the candidates.
adjacent_nodes = await self._get_adjacent(
links=visited_links,
query_embedding=query_embedding,
k_per_link=adjacent_k,
filter=filter,
retrieved_docs=retrieved_docs,
)
new_candidates: dict[str, list[float]] = {}
for adjacent_node in adjacent_nodes:
if adjacent_node.id not in outgoing_links_map:
outgoing_links_map[adjacent_node.id] = _outgoing_links(
node=adjacent_node
)
new_candidates[adjacent_node.id] = adjacent_node.embedding
helper.add_candidates(new_candidates)
async def fetch_initial_candidates() -> None:
nonlocal outgoing_links_map, visited_links, retrieved_docs
results = (
await self.vector_store.asimilarity_search_with_embedding_id_by_vector(
embedding=query_embedding,
k=fetch_k,
filter=filter,
)
)
candidates: dict[str, list[float]] = {}
for doc, embedding, doc_id in results:
if doc_id not in retrieved_docs:
retrieved_docs[doc_id] = doc
if doc_id not in outgoing_links_map:
node = _doc_to_node(doc)
outgoing_links_map[doc_id] = _outgoing_links(node=node)
candidates[doc_id] = embedding
helper.add_candidates(candidates)
if initial_roots:
await fetch_neighborhood(initial_roots)
if fetch_k > 0:
await fetch_initial_candidates()
# Tracks the depth of each candidate.
depths = {candidate_id: 0 for candidate_id in helper.candidate_ids()}
# Select the best item, K times.
for _ in range(k):
selected_id = helper.pop_best()
if selected_id is None:
break
next_depth = depths[selected_id] + 1
if next_depth < depth:
# If the next nodes would not exceed the depth limit, find the
# adjacent nodes.
# Find the links linked to from the selected ID.
selected_outgoing_links = outgoing_links_map.pop(selected_id)
# Don't re-visit already visited links.
selected_outgoing_links.difference_update(visited_links)
# Find the nodes with incoming links from those links.
adjacent_nodes = await self._get_adjacent(
links=selected_outgoing_links,
query_embedding=query_embedding,
k_per_link=adjacent_k,
filter=filter,
retrieved_docs=retrieved_docs,
)
# Record the selected_outgoing_links as visited.
visited_links.update(selected_outgoing_links)
new_candidates = {}
for adjacent_node in adjacent_nodes:
if adjacent_node.id not in outgoing_links_map:
outgoing_links_map[adjacent_node.id] = _outgoing_links(
node=adjacent_node
)
new_candidates[adjacent_node.id] = adjacent_node.embedding
if next_depth < depths.get(adjacent_node.id, depth + 1):
# If this is a new shortest depth, or there was no
# previous depth, update the depths. This ensures that
# when we discover a node we will have the shortest
# depth available.
#
# NOTE: No effort is made to traverse from nodes that
# were previously selected if they become reachable via
# a shorter path via nodes selected later. This is
# currently "intended", but may be worth experimenting
# with.
depths[adjacent_node.id] = next_depth
helper.add_candidates(new_candidates)
for doc_id, similarity_score, mmr_score in zip(
helper.selected_ids,
helper.selected_similarity_scores,
helper.selected_mmr_scores,
):
if doc_id in retrieved_docs:
doc = self._restore_links(retrieved_docs[doc_id])
doc.metadata["similarity_score"] = similarity_score
doc.metadata["mmr_score"] = mmr_score
yield doc
else:
msg = f"retrieved_docs should contain id: {doc_id}"
raise RuntimeError(msg)
@override
def mmr_traversal_search(
self,
query: str,
*,
initial_roots: Sequence[str] = (),
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
initial_roots: Optional list of document IDs to use for initializing search.
The top `adjacent_k` nodes adjacent to each initial root will be
included in the set of initial candidates. To fetch only in the
neighborhood of these nodes, set `fetch_k = 0`.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of initial Documents to fetch via similarity.
Will be added to the nodes adjacent to `initial_roots`.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to -infinity.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
"""
async def collect_docs() -> Iterable[Document]:
async_iter = self.ammr_traversal_search(
query=query,
initial_roots=initial_roots,
k=k,
depth=depth,
fetch_k=fetch_k,
adjacent_k=adjacent_k,
lambda_mult=lambda_mult,
score_threshold=score_threshold,
filter=filter,
**kwargs,
)
return [doc async for doc in async_iter]
return asyncio.run(collect_docs())
@override
async def atraversal_search( # noqa: C901
self,
query: str,
*,
k: int = 4,
depth: int = 1,
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from this knowledge store.
First, `k` nodes are retrieved using a vector search for the `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial vector search.
Defaults to 4.
depth: The maximum depth of edges to traverse. Defaults to 1.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
# Depth 0:
# Query for `k` nodes similar to the question.
# Retrieve `content_id` and `outgoing_links()`.
#
# Depth 1:
# Query for nodes that have an incoming link in the `outgoing_links()` set.
# Combine node IDs.
# Query for `outgoing_links()` of those "new" node IDs.
#
# ...
# Map from visited ID to depth
visited_ids: dict[str, int] = {}
# Map from visited link to depth
visited_links: dict[Link, int] = {}
# Map from id to Document
retrieved_docs: dict[str, Document] = {}
async def visit_nodes(d: int, docs: Iterable[Document]) -> None:
"""Recursively visit nodes and their outgoing links."""
nonlocal visited_ids, visited_links, retrieved_docs
# Iterate over nodes, tracking the *new* outgoing links for this
# depth. These are links that are either new, or newly discovered at a
# lower depth.
outgoing_links: set[Link] = set()
for doc in docs:
if doc.id is not None:
if doc.id not in retrieved_docs:
retrieved_docs[doc.id] = doc
# If this node is at a closer depth, update visited_ids
if d <= visited_ids.get(doc.id, depth):
visited_ids[doc.id] = d
# If we can continue traversing from this node,
if d < depth:
node = _doc_to_node(doc=doc)
# Record any new (or newly discovered at a lower depth)
# links to the set to traverse.
for link in _outgoing_links(node=node):
if d <= visited_links.get(link, depth):
# Record that we'll query this link at the
# given depth, so we don't fetch it again
# (unless we find it an earlier depth)
visited_links[link] = d
outgoing_links.add(link)
if outgoing_links:
metadata_search_tasks = []
for outgoing_link in outgoing_links:
metadata_filter = self._get_metadata_filter(
metadata=filter,
outgoing_link=outgoing_link,
)
metadata_search_tasks.append(
asyncio.create_task(
self.vector_store.ametadata_search(
filter=metadata_filter, n=1000
)
)
)
results = await asyncio.gather(*metadata_search_tasks)
# Visit targets concurrently
visit_target_tasks = [
visit_targets(d=d + 1, docs=docs) for docs in results
]
await asyncio.gather(*visit_target_tasks)
async def visit_targets(d: int, docs: Iterable[Document]) -> None:
"""Visit target nodes retrieved from outgoing links."""
nonlocal visited_ids, retrieved_docs
new_ids_at_next_depth = set()
for doc in docs:
if doc.id is not None:
if doc.id not in retrieved_docs:
retrieved_docs[doc.id] = doc
if d <= visited_ids.get(doc.id, depth):
new_ids_at_next_depth.add(doc.id)
if new_ids_at_next_depth:
visit_node_tasks = [
visit_nodes(d=d, docs=[retrieved_docs[doc_id]])
for doc_id in new_ids_at_next_depth
if doc_id in retrieved_docs
]
fetch_tasks = [
asyncio.create_task(
self.vector_store.aget_by_document_id(document_id=doc_id)
)
for doc_id in new_ids_at_next_depth
if doc_id not in retrieved_docs
]
new_docs: list[Document | None] = await asyncio.gather(*fetch_tasks)
visit_node_tasks.extend(
visit_nodes(d=d, docs=[new_doc])
for new_doc in new_docs
if new_doc is not None
)
await asyncio.gather(*visit_node_tasks)
# Start the traversal
initial_docs = self.vector_store.similarity_search(
query=query,
k=k,
filter=filter,
)
await visit_nodes(d=0, docs=initial_docs)
for doc_id in visited_ids:
if doc_id in retrieved_docs:
yield self._restore_links(retrieved_docs[doc_id])
else:
msg = f"retrieved_docs should contain id: {doc_id}"
raise RuntimeError(msg)
@override
def traversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 1,
filter: dict[str, Any] | None = None,
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from this knowledge store.
First, `k` nodes are retrieved using a vector search for the `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial vector search.
Defaults to 4.
depth: The maximum depth of edges to traverse. Defaults to 1.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
async def collect_docs() -> Iterable[Document]:
async_iter = self.atraversal_search(
query=query,
k=k,
depth=depth,
filter=filter,
**kwargs,
)
return [doc async for doc in async_iter]
return asyncio.run(collect_docs())
async def _get_outgoing_links(self, source_ids: Iterable[str]) -> set[Link]:
"""Return the set of outgoing links for the given source IDs asynchronously.
Args:
source_ids: The IDs of the source nodes to retrieve outgoing links for.
Returns:
A set of `Link` objects representing the outgoing links from the source
nodes.
"""
links = set()
# Create coroutine objects without scheduling them yet
coroutines = [
self.vector_store.aget_by_document_id(document_id=source_id)
for source_id in source_ids
]
# Schedule and await all coroutines
docs = await asyncio.gather(*coroutines)
for doc in docs:
if doc is not None:
node = _doc_to_node(doc=doc)
links.update(_outgoing_links(node=node))
return links
async def _get_adjacent(
self,
links: set[Link],
query_embedding: list[float],
retrieved_docs: dict[str, Document],
k_per_link: int | None = None,
filter: dict[str, Any] | None = None, # noqa: A002
) -> Iterable[AdjacentNode]:
"""Return the target nodes with incoming links from any of the given links.
Args:
links: The links to look for.
query_embedding: The query embedding. Used to rank target nodes.
retrieved_docs: A cache of retrieved docs. This will be added to.
k_per_link: The number of target nodes to fetch for each link.
filter: Optional metadata to filter the results.
Returns:
Iterable of adjacent edges.
"""
targets: dict[str, AdjacentNode] = {}
tasks = []
for link in links:
metadata_filter = self._get_metadata_filter(
metadata=filter,
outgoing_link=link,
)
tasks.append(
self.vector_store.asimilarity_search_with_embedding_id_by_vector(
embedding=query_embedding,
k=k_per_link or 10,
filter=metadata_filter,
)
)
results = await asyncio.gather(*tasks)
for result in results:
for doc, embedding, doc_id in result:
if doc_id not in retrieved_docs:
retrieved_docs[doc_id] = doc
if doc_id not in targets:
node = _doc_to_node(doc=doc)
targets[doc_id] = AdjacentNode(node=node, embedding=embedding)
# TODO: Consider a combined limit based on the similarity and/or
# predicated MMR score?
return targets.values()
@staticmethod
def _build_docs_from_texts(
texts: List[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
) -> List[Document]:
docs: List[Document] = []
for i, text in enumerate(texts):
doc = Document(
page_content=text,
)
if metadatas is not None:
doc.metadata = metadatas[i]
if ids is not None:
doc.id = ids[i]
docs.append(doc)
return docs
@classmethod
def from_texts(
cls: Type[CGVST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
*,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = "",
ids: Optional[List[str]] = None,
ttl_seconds: Optional[int] = None,
body_index_options: Optional[List[Tuple[str, Any]]] = None,
metadata_deny_list: Optional[list[str]] = None,
**kwargs: Any,
) -> CGVST:
"""Create a CassandraGraphVectorStore from raw texts.
Args:
texts: Texts to add to the vectorstore.
embedding: Embedding function to use.
metadatas: Optional list of metadatas associated with the texts.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space.
If not provided, it is resolved from cassio.
table_name: Cassandra table (required).
ids: Optional list of IDs associated with the texts.
ttl_seconds: Optional time-to-live for the added texts.
body_index_options: Optional options used to create the body index.
Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER]
metadata_deny_list: Optional list of metadata keys to not index.
i.e. to fine-tune which of the metadata fields are indexed.
Note: if you plan to have massive unique text metadata entries,
consider not indexing them for performance
(and to overcome max-length limitations).
Note: the `metadata_indexing` parameter from
langchain_community.utilities.cassandra.Cassandra is not
exposed since CassandraGraphVectorStore only supports the
deny_list option.
Returns:
a CassandraGraphVectorStore.
"""
docs = cls._build_docs_from_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
)
return cls.from_documents(
documents=docs,
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
ttl_seconds=ttl_seconds,
body_index_options=body_index_options,
metadata_deny_list=metadata_deny_list,
**kwargs,
)
@classmethod
async def afrom_texts(
cls: Type[CGVST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
*,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = "",
ids: Optional[List[str]] = None,
ttl_seconds: Optional[int] = None,
body_index_options: Optional[List[Tuple[str, Any]]] = None,
metadata_deny_list: Optional[list[str]] = None,
**kwargs: Any,
) -> CGVST:
"""Create a CassandraGraphVectorStore from raw texts.
Args:
texts: Texts to add to the vectorstore.
embedding: Embedding function to use.
metadatas: Optional list of metadatas associated with the texts.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space.
If not provided, it is resolved from cassio.
table_name: Cassandra table (required).
ids: Optional list of IDs associated with the texts.
ttl_seconds: Optional time-to-live for the added texts.
body_index_options: Optional options used to create the body index.
Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER]
metadata_deny_list: Optional list of metadata keys to not index.
i.e. to fine-tune which of the metadata fields are indexed.
Note: if you plan to have massive unique text metadata entries,
consider not indexing them for performance
(and to overcome max-length limitations).
Note: the `metadata_indexing` parameter from
langchain_community.utilities.cassandra.Cassandra is not
exposed since CassandraGraphVectorStore only supports the
deny_list option.
Returns:
a CassandraGraphVectorStore.
"""
docs = cls._build_docs_from_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
)
return await cls.afrom_documents(
documents=docs,
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
ttl_seconds=ttl_seconds,
body_index_options=body_index_options,
metadata_deny_list=metadata_deny_list,
**kwargs,
)
@staticmethod
def _add_ids_to_docs(
docs: List[Document],
ids: Optional[List[str]] = None,
) -> List[Document]:
if ids is not None:
for doc, doc_id in zip(docs, ids):
doc.id = doc_id
return docs
@classmethod
def from_documents(
cls: Type[CGVST],
documents: List[Document],
embedding: Embeddings,
*,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = "",
ids: Optional[List[str]] = None,
ttl_seconds: Optional[int] = None,
body_index_options: Optional[List[Tuple[str, Any]]] = None,
metadata_deny_list: Optional[list[str]] = None,
**kwargs: Any,
) -> CGVST:
"""Create a CassandraGraphVectorStore from a document list.
Args:
documents: Documents to add to the vectorstore.
embedding: Embedding function to use.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space.
If not provided, it is resolved from cassio.
table_name: Cassandra table (required).
ids: Optional list of IDs associated with the documents.
ttl_seconds: Optional time-to-live for the added documents.
body_index_options: Optional options used to create the body index.
Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER]
metadata_deny_list: Optional list of metadata keys to not index.
i.e. to fine-tune which of the metadata fields are indexed.
Note: if you plan to have massive unique text metadata entries,
consider not indexing them for performance
(and to overcome max-length limitations).
Note: the `metadata_indexing` parameter from
langchain_community.utilities.cassandra.Cassandra is not
exposed since CassandraGraphVectorStore only supports the
deny_list option.
Returns:
a CassandraGraphVectorStore.
"""
store = cls(
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
ttl_seconds=ttl_seconds,
body_index_options=body_index_options,
metadata_deny_list=metadata_deny_list,
**kwargs,
)
store.add_documents(documents=cls._add_ids_to_docs(docs=documents, ids=ids))
return store
@classmethod
async def afrom_documents(
cls: Type[CGVST],
documents: List[Document],
embedding: Embeddings,
*,
session: Optional[Session] = None,
keyspace: Optional[str] = None,
table_name: str = "",
ids: Optional[List[str]] = None,
ttl_seconds: Optional[int] = None,
body_index_options: Optional[List[Tuple[str, Any]]] = None,
metadata_deny_list: Optional[list[str]] = None,
**kwargs: Any,
) -> CGVST:
"""Create a CassandraGraphVectorStore from a document list.
Args:
documents: Documents to add to the vectorstore.
embedding: Embedding function to use.
session: Cassandra driver session.
If not provided, it is resolved from cassio.
keyspace: Cassandra key space.
If not provided, it is resolved from cassio.
table_name: Cassandra table (required).
ids: Optional list of IDs associated with the documents.
ttl_seconds: Optional time-to-live for the added documents.
body_index_options: Optional options used to create the body index.
Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER]
metadata_deny_list: Optional list of metadata keys to not index.
i.e. to fine-tune which of the metadata fields are indexed.
Note: if you plan to have massive unique text metadata entries,
consider not indexing them for performance
(and to overcome max-length limitations).
Note: the `metadata_indexing` parameter from
langchain_community.utilities.cassandra.Cassandra is not
exposed since CassandraGraphVectorStore only supports the
deny_list option.
Returns:
a CassandraGraphVectorStore.
"""
store = cls(
embedding=embedding,
session=session,
keyspace=keyspace,
table_name=table_name,
ttl_seconds=ttl_seconds,
setup_mode=SetupMode.ASYNC,
body_index_options=body_index_options,
metadata_deny_list=metadata_deny_list,
**kwargs,
)
await store.aadd_documents(
documents=cls._add_ids_to_docs(docs=documents, ids=ids)
)
return store
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/graph_vectorstores/base.py | from __future__ import annotations
import logging
from abc import abstractmethod
from collections.abc import AsyncIterable, Collection, Iterable, Iterator
from typing import (
Any,
ClassVar,
Optional,
Sequence,
cast,
)
from langchain_core._api import beta
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.load import Serializable
from langchain_core.runnables import run_in_executor
from langchain_core.vectorstores import VectorStore, VectorStoreRetriever
from pydantic import Field
from langchain_community.graph_vectorstores.links import METADATA_LINKS_KEY, Link
logger = logging.getLogger(__name__)
def _has_next(iterator: Iterator) -> bool:
"""Checks if the iterator has more elements.
Warning: consumes an element from the iterator"""
sentinel = object()
return next(iterator, sentinel) is not sentinel
@beta()
class Node(Serializable):
"""Node in the GraphVectorStore.
Edges exist from nodes with an outgoing link to nodes with a matching incoming link.
For instance two nodes `a` and `b` connected over a hyperlink ``https://some-url``
would look like:
.. code-block:: python
[
Node(
id="a",
text="some text a",
links= [
Link(kind="hyperlink", tag="https://some-url", direction="incoming")
],
),
Node(
id="b",
text="some text b",
links= [
Link(kind="hyperlink", tag="https://some-url", direction="outgoing")
],
)
]
"""
id: Optional[str] = None
"""Unique ID for the node. Will be generated by the GraphVectorStore if not set."""
text: str
"""Text contained by the node."""
metadata: dict = Field(default_factory=dict)
"""Metadata for the node."""
links: list[Link] = Field(default_factory=list)
"""Links associated with the node."""
def _texts_to_nodes(
texts: Iterable[str],
metadatas: Optional[Iterable[dict]],
ids: Optional[Iterable[str]],
) -> Iterator[Node]:
metadatas_it = iter(metadatas) if metadatas else None
ids_it = iter(ids) if ids else None
for text in texts:
try:
_metadata = next(metadatas_it).copy() if metadatas_it else {}
except StopIteration as e:
raise ValueError("texts iterable longer than metadatas") from e
try:
_id = next(ids_it) if ids_it else None
except StopIteration as e:
raise ValueError("texts iterable longer than ids") from e
links = _metadata.pop(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
links = list(links)
yield Node(
id=_id,
metadata=_metadata,
text=text,
links=links,
)
if ids_it and _has_next(ids_it):
raise ValueError("ids iterable longer than texts")
if metadatas_it and _has_next(metadatas_it):
raise ValueError("metadatas iterable longer than texts")
def _documents_to_nodes(documents: Iterable[Document]) -> Iterator[Node]:
for doc in documents:
metadata = doc.metadata.copy()
links = metadata.pop(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
links = list(links)
yield Node(
id=doc.id,
metadata=metadata,
text=doc.page_content,
links=links,
)
@beta()
def nodes_to_documents(nodes: Iterable[Node]) -> Iterator[Document]:
"""Convert nodes to documents.
Args:
nodes: The nodes to convert to documents.
Returns:
The documents generated from the nodes.
"""
for node in nodes:
metadata = node.metadata.copy()
metadata[METADATA_LINKS_KEY] = [
# Convert the core `Link` (from the node) back to the local `Link`.
Link(kind=link.kind, direction=link.direction, tag=link.tag)
for link in node.links
]
yield Document(
id=node.id,
page_content=node.text,
metadata=metadata,
)
@beta(message="Added in version 0.3.1 of langchain_community. API subject to change.")
class GraphVectorStore(VectorStore):
"""A hybrid vector-and-graph graph store.
Document chunks support vector-similarity search as well as edges linking
chunks based on structural and semantic properties.
.. versionadded:: 0.3.1
"""
@abstractmethod
def add_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> Iterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
**kwargs: Additional keyword arguments.
"""
async def aadd_nodes(
self,
nodes: Iterable[Node],
**kwargs: Any,
) -> AsyncIterable[str]:
"""Add nodes to the graph store.
Args:
nodes: the nodes to add.
**kwargs: Additional keyword arguments.
"""
iterator = iter(await run_in_executor(None, self.add_nodes, nodes, **kwargs))
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
*,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the vector store.
The Links present in the metadata field `links` will be extracted to create
the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_texts(
ids=["a", "b"],
texts=["some text a", "some text b"],
metadatas=[
{
"links": [
Link.incoming(kind="hyperlink", tag="https://some-url")
]
},
{
"links": [
Link.outgoing(kind="hyperlink", tag="https://some-url")
]
},
],
)
Args:
texts: Iterable of strings to add to the vector store.
metadatas: Optional list of metadatas associated with the texts.
The metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
ids: Optional list of IDs associated with the texts.
**kwargs: vector store specific parameters.
Returns:
List of ids from adding the texts into the vector store.
"""
nodes = _texts_to_nodes(texts, metadatas, ids)
return list(self.add_nodes(nodes, **kwargs))
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[Iterable[dict]] = None,
*,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> list[str]:
"""Run more texts through the embeddings and add to the vector store.
The Links present in the metadata field `links` will be extracted to create
the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
await store.aadd_texts(
ids=["a", "b"],
texts=["some text a", "some text b"],
metadatas=[
{
"links": [
Link.incoming(kind="hyperlink", tag="https://some-url")
]
},
{
"links": [
Link.outgoing(kind="hyperlink", tag="https://some-url")
]
},
],
)
Args:
texts: Iterable of strings to add to the vector store.
metadatas: Optional list of metadatas associated with the texts.
The metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
ids: Optional list of IDs associated with the texts.
**kwargs: vector store specific parameters.
Returns:
List of ids from adding the texts into the vector store.
"""
nodes = _texts_to_nodes(texts, metadatas, ids)
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
def add_documents(
self,
documents: Iterable[Document],
**kwargs: Any,
) -> list[str]:
"""Run more documents through the embeddings and add to the vector store.
The Links present in the document metadata field `links` will be extracted to
create the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_documents(
[
Document(
id="a",
page_content="some text a",
metadata={
"links": [
Link.incoming(kind="hyperlink", tag="http://some-url")
]
}
),
Document(
id="b",
page_content="some text b",
metadata={
"links": [
Link.outgoing(kind="hyperlink", tag="http://some-url")
]
}
),
]
)
Args:
documents: Documents to add to the vector store.
The document's metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
Returns:
List of IDs of the added texts.
"""
nodes = _documents_to_nodes(documents)
return list(self.add_nodes(nodes, **kwargs))
async def aadd_documents(
self,
documents: Iterable[Document],
**kwargs: Any,
) -> list[str]:
"""Run more documents through the embeddings and add to the vector store.
The Links present in the document metadata field `links` will be extracted to
create the `Node` links.
Eg if nodes `a` and `b` are connected over a hyperlink `https://some-url`, the
function call would look like:
.. code-block:: python
store.add_documents(
[
Document(
id="a",
page_content="some text a",
metadata={
"links": [
Link.incoming(kind="hyperlink", tag="http://some-url")
]
}
),
Document(
id="b",
page_content="some text b",
metadata={
"links": [
Link.outgoing(kind="hyperlink", tag="http://some-url")
]
}
),
]
)
Args:
documents: Documents to add to the vector store.
The document's metadata key `links` shall be an iterable of
:py:class:`~langchain_community.graph_vectorstores.links.Link`.
Returns:
List of IDs of the added texts.
"""
nodes = _documents_to_nodes(documents)
return [_id async for _id in self.aadd_nodes(nodes, **kwargs)]
@abstractmethod
def traversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 1,
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from traversing this graph store.
First, `k` nodes are retrieved using a search for each `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial search.
Defaults to 4. Applies to each of the query strings.
depth: The maximum depth of edges to traverse. Defaults to 1.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
async def atraversal_search(
self,
query: str,
*,
k: int = 4,
depth: int = 1,
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from traversing this graph store.
First, `k` nodes are retrieved using a search for each `query` string.
Then, additional nodes are discovered up to the given `depth` from those
starting nodes.
Args:
query: The query string.
k: The number of Documents to return from the initial search.
Defaults to 4. Applies to each of the query strings.
depth: The maximum depth of edges to traverse. Defaults to 1.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
Returns:
Collection of retrieved documents.
"""
iterator = iter(
await run_in_executor(
None,
self.traversal_search,
query,
k=k,
depth=depth,
filter=filter,
**kwargs,
)
)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
@abstractmethod
def mmr_traversal_search(
self,
query: str,
*,
initial_roots: Sequence[str] = (),
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> Iterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
initial_roots: Optional list of document IDs to use for initializing search.
The top `adjacent_k` nodes adjacent to each initial root will be
included in the set of initial candidates. To fetch only in the
neighborhood of these nodes, set `fetch_k = 0`.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch via similarity.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to negative infinity.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
"""
async def ammr_traversal_search(
self,
query: str,
*,
initial_roots: Sequence[str] = (),
k: int = 4,
depth: int = 2,
fetch_k: int = 100,
adjacent_k: int = 10,
lambda_mult: float = 0.5,
score_threshold: float = float("-inf"),
filter: dict[str, Any] | None = None, # noqa: A002
**kwargs: Any,
) -> AsyncIterable[Document]:
"""Retrieve documents from this graph store using MMR-traversal.
This strategy first retrieves the top `fetch_k` results by similarity to
the question. It then selects the top `k` results based on
maximum-marginal relevance using the given `lambda_mult`.
At each step, it considers the (remaining) documents from `fetch_k` as
well as any documents connected by edges to a selected document
retrieved based on similarity (a "root").
Args:
query: The query string to search for.
initial_roots: Optional list of document IDs to use for initializing search.
The top `adjacent_k` nodes adjacent to each initial root will be
included in the set of initial candidates. To fetch only in the
neighborhood of these nodes, set `fetch_k = 0`.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch via similarity.
Defaults to 100.
adjacent_k: Number of adjacent Documents to fetch.
Defaults to 10.
depth: Maximum depth of a node (number of edges) from a node
retrieved via similarity. Defaults to 2.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to negative infinity.
filter: Optional metadata to filter the results.
**kwargs: Additional keyword arguments.
"""
iterator = iter(
await run_in_executor(
None,
self.mmr_traversal_search,
query,
initial_roots=initial_roots,
k=k,
fetch_k=fetch_k,
adjacent_k=adjacent_k,
depth=depth,
lambda_mult=lambda_mult,
score_threshold=score_threshold,
filter=filter,
**kwargs,
)
)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
return list(self.traversal_search(query, k=k, depth=0))
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> list[Document]:
if kwargs.get("depth", 0) > 0:
logger.warning(
"'mmr' search started with depth > 0. "
"Maybe you meant to do a 'mmr_traversal' search?"
)
return list(
self.mmr_traversal_search(
query, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, depth=0
)
)
async def asimilarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> list[Document]:
return [doc async for doc in self.atraversal_search(query, k=k, depth=0)]
def search(self, query: str, search_type: str, **kwargs: Any) -> list[Document]:
if search_type == "similarity":
return self.similarity_search(query, **kwargs)
elif search_type == "similarity_score_threshold":
docs_and_similarities = self.similarity_search_with_relevance_scores(
query, **kwargs
)
return [doc for doc, _ in docs_and_similarities]
elif search_type == "mmr":
return self.max_marginal_relevance_search(query, **kwargs)
elif search_type == "traversal":
return list(self.traversal_search(query, **kwargs))
elif search_type == "mmr_traversal":
return list(self.mmr_traversal_search(query, **kwargs))
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold', "
"'mmr', 'traversal', or 'mmr_traversal'."
)
async def asearch(
self, query: str, search_type: str, **kwargs: Any
) -> list[Document]:
if search_type == "similarity":
return await self.asimilarity_search(query, **kwargs)
elif search_type == "similarity_score_threshold":
docs_and_similarities = await self.asimilarity_search_with_relevance_scores(
query, **kwargs
)
return [doc for doc, _ in docs_and_similarities]
elif search_type == "mmr":
return await self.amax_marginal_relevance_search(query, **kwargs)
elif search_type == "traversal":
return [doc async for doc in self.atraversal_search(query, **kwargs)]
elif search_type == "mmr_traversal":
return [doc async for doc in self.ammr_traversal_search(query, **kwargs)]
else:
raise ValueError(
f"search_type of {search_type} not allowed. Expected "
"search_type to be 'similarity', 'similarity_score_threshold', "
"'mmr', 'traversal', or 'mmr_traversal'."
)
def as_retriever(self, **kwargs: Any) -> GraphVectorStoreRetriever:
"""Return GraphVectorStoreRetriever initialized from this GraphVectorStore.
Args:
**kwargs: Keyword arguments to pass to the search function.
Can include:
- search_type (Optional[str]): Defines the type of search that
the Retriever should perform.
Can be ``traversal`` (default), ``similarity``, ``mmr``,
``mmr_traversal``, or ``similarity_score_threshold``.
- search_kwargs (Optional[Dict]): Keyword arguments to pass to the
search function. Can include things like:
- k(int): Amount of documents to return (Default: 4).
- depth(int): The maximum depth of edges to traverse (Default: 1).
Only applies to search_type: ``traversal`` and ``mmr_traversal``.
- score_threshold(float): Minimum relevance threshold
for similarity_score_threshold.
- fetch_k(int): Amount of documents to pass to MMR algorithm
(Default: 20).
- lambda_mult(float): Diversity of results returned by MMR;
1 for minimum diversity and 0 for maximum. (Default: 0.5).
Returns:
Retriever for this GraphVectorStore.
Examples:
.. code-block:: python
# Retrieve documents traversing edges
docsearch.as_retriever(
search_type="traversal",
search_kwargs={'k': 6, 'depth': 2}
)
# Retrieve documents with higher diversity
# Useful if your dataset has many similar documents
docsearch.as_retriever(
search_type="mmr_traversal",
search_kwargs={'k': 6, 'lambda_mult': 0.25, 'depth': 2}
)
# Fetch more documents for the MMR algorithm to consider
# But only return the top 5
docsearch.as_retriever(
search_type="mmr_traversal",
search_kwargs={'k': 5, 'fetch_k': 50, 'depth': 2}
)
# Only retrieve documents that have a relevance score
# Above a certain threshold
docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={'score_threshold': 0.8}
)
# Only get the single most similar document from the dataset
docsearch.as_retriever(search_kwargs={'k': 1})
"""
return GraphVectorStoreRetriever(vectorstore=self, **kwargs)
@beta(message="Added in version 0.3.1 of langchain_community. API subject to change.")
class GraphVectorStoreRetriever(VectorStoreRetriever):
"""Retriever for GraphVectorStore.
A graph vector store retriever is a retriever that uses a graph vector store to
retrieve documents.
It is similar to a vector store retriever, except that it uses both vector
similarity and graph connections to retrieve documents.
It uses the search methods implemented by a graph vector store, like traversal
search and MMR traversal search, to query the texts in the graph vector store.
Example::
store = CassandraGraphVectorStore(...)
retriever = store.as_retriever()
retriever.invoke("What is ...")
.. seealso::
:mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
How to use a graph vector store as a retriever
==============================================
Creating a retriever from a graph vector store
----------------------------------------------
You can build a retriever from a graph vector store using its
:meth:`~langchain_community.graph_vectorstores.base.GraphVectorStore.as_retriever`
method.
First we instantiate a graph vector store.
We will use a store backed by Cassandra
:class:`~langchain_community.graph_vectorstores.cassandra.CassandraGraphVectorStore`
graph vector store::
from langchain_community.document_loaders import TextLoader
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
from langchain_community.graph_vectorstores.extractors import (
KeybertLinkExtractor,
LinkExtractorTransformer,
)
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
pipeline = LinkExtractorTransformer([KeybertLinkExtractor()])
pipeline.transform_documents(texts)
embeddings = OpenAIEmbeddings()
graph_vectorstore = CassandraGraphVectorStore.from_documents(texts, embeddings)
We can then instantiate a retriever::
retriever = graph_vectorstore.as_retriever()
This creates a retriever (specifically a ``GraphVectorStoreRetriever``), which we
can use in the usual way::
docs = retriever.invoke("what did the president say about ketanji brown jackson?")
Maximum marginal relevance traversal retrieval
----------------------------------------------
By default, the graph vector store retriever uses similarity search, then expands
the retrieved set by following a fixed number of graph edges.
If the underlying graph vector store supports maximum marginal relevance traversal,
you can specify that as the search type.
MMR-traversal is a retrieval method combining MMR and graph traversal.
The strategy first retrieves the top fetch_k results by similarity to the question.
It then iteratively expands the set of fetched documents by following adjacent_k
graph edges and selects the top k results based on maximum-marginal relevance using
the given ``lambda_mult``::
retriever = graph_vectorstore.as_retriever(search_type="mmr_traversal")
Passing search parameters
-------------------------
We can pass parameters to the underlying graph vector store's search methods using
``search_kwargs``.
Specifying graph traversal depth
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For example, we can set the graph traversal depth to only return documents
reachable through a given number of graph edges::
retriever = graph_vectorstore.as_retriever(search_kwargs={"depth": 3})
Specifying MMR parameters
^^^^^^^^^^^^^^^^^^^^^^^^^
When using search type ``mmr_traversal``, several parameters of the MMR algorithm
can be configured.
The ``fetch_k`` parameter determines how many documents are fetched using vector
similarity and ``adjacent_k`` parameter determines how many documents are fetched
using graph edges.
The ``lambda_mult`` parameter controls how the MMR re-ranking weights similarity to
the query string vs diversity among the retrieved documents as fetched documents
are selected for the set of ``k`` final results::
retriever = graph_vectorstore.as_retriever(
search_type="mmr",
search_kwargs={"fetch_k": 20, "adjacent_k": 20, "lambda_mult": 0.25},
)
Specifying top k
^^^^^^^^^^^^^^^^
We can also limit the number of documents ``k`` returned by the retriever.
Note that if ``depth`` is greater than zero, the retriever may return more documents
than is specified by ``k``, since both the original ``k`` documents retrieved using
vector similarity and any documents connected via graph edges will be returned::
retriever = graph_vectorstore.as_retriever(search_kwargs={"k": 1})
Similarity score threshold retrieval
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For example, we can set a similarity score threshold and only return documents with
a score above that threshold::
retriever = graph_vectorstore.as_retriever(search_kwargs={"score_threshold": 0.5})
""" # noqa: E501
vectorstore: VectorStore
"""VectorStore to use for retrieval."""
search_type: str = "traversal"
"""Type of search to perform. Defaults to "traversal"."""
allowed_search_types: ClassVar[Collection[str]] = (
"similarity",
"similarity_score_threshold",
"mmr",
"traversal",
"mmr_traversal",
)
@property
def graph_vectorstore(self) -> GraphVectorStore:
return cast(GraphVectorStore, self.vectorstore)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
if self.search_type == "traversal":
return list(
self.graph_vectorstore.traversal_search(query, **self.search_kwargs)
)
elif self.search_type == "mmr_traversal":
return list(
self.graph_vectorstore.mmr_traversal_search(query, **self.search_kwargs)
)
else:
return super()._get_relevant_documents(query, run_manager=run_manager)
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
if self.search_type == "traversal":
return [
doc
async for doc in self.graph_vectorstore.atraversal_search(
query, **self.search_kwargs
)
]
elif self.search_type == "mmr_traversal":
return [
doc
async for doc in self.graph_vectorstore.ammr_traversal_search(
query, **self.search_kwargs
)
]
else:
return await super()._aget_relevant_documents(
query, run_manager=run_manager
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/graph_vectorstores/mmr_helper.py | """Tools for the Graph Traversal Maximal Marginal Relevance (MMR) reranking."""
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING, Iterable
import numpy as np
from langchain_community.utils.math import cosine_similarity
if TYPE_CHECKING:
from numpy.typing import NDArray
def _emb_to_ndarray(embedding: list[float]) -> NDArray[np.float32]:
emb_array = np.array(embedding, dtype=np.float32)
if emb_array.ndim == 1:
emb_array = np.expand_dims(emb_array, axis=0)
return emb_array
NEG_INF = float("-inf")
@dataclasses.dataclass
class _Candidate:
id: str
similarity: float
weighted_similarity: float
weighted_redundancy: float
score: float = dataclasses.field(init=False)
def __post_init__(self) -> None:
self.score = self.weighted_similarity - self.weighted_redundancy
def update_redundancy(self, new_weighted_redundancy: float) -> None:
if new_weighted_redundancy > self.weighted_redundancy:
self.weighted_redundancy = new_weighted_redundancy
self.score = self.weighted_similarity - self.weighted_redundancy
class MmrHelper:
"""Helper for executing an MMR traversal query.
Args:
query_embedding: The embedding of the query to use for scoring.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding to maximum
diversity and 1 to minimum diversity. Defaults to 0.5.
score_threshold: Only documents with a score greater than or equal
this threshold will be chosen. Defaults to -infinity.
"""
dimensions: int
"""Dimensions of the embedding."""
query_embedding: NDArray[np.float32]
"""Embedding of the query as a (1,dim) ndarray."""
lambda_mult: float
"""Number between 0 and 1.
Determines the degree of diversity among the results with 0 corresponding to
maximum diversity and 1 to minimum diversity."""
lambda_mult_complement: float
"""1 - lambda_mult."""
score_threshold: float
"""Only documents with a score greater than or equal to this will be chosen."""
selected_ids: list[str]
"""List of selected IDs (in selection order)."""
selected_mmr_scores: list[float]
"""List of MMR score at the time each document is selected."""
selected_similarity_scores: list[float]
"""List of similarity score for each selected document."""
selected_embeddings: NDArray[np.float32]
"""(N, dim) ndarray with a row for each selected node."""
candidate_id_to_index: dict[str, int]
"""Dictionary of candidate IDs to indices in candidates and candidate_embeddings."""
candidates: list[_Candidate]
"""List containing information about candidates.
Same order as rows in `candidate_embeddings`.
"""
candidate_embeddings: NDArray[np.float32]
"""(N, dim) ndarray with a row for each candidate."""
best_score: float
best_id: str | None
def __init__(
self,
k: int,
query_embedding: list[float],
lambda_mult: float = 0.5,
score_threshold: float = NEG_INF,
) -> None:
"""Create a new Traversal MMR helper."""
self.query_embedding = _emb_to_ndarray(query_embedding)
self.dimensions = self.query_embedding.shape[1]
self.lambda_mult = lambda_mult
self.lambda_mult_complement = 1 - lambda_mult
self.score_threshold = score_threshold
self.selected_ids = []
self.selected_similarity_scores = []
self.selected_mmr_scores = []
# List of selected embeddings (in selection order).
self.selected_embeddings = np.ndarray((k, self.dimensions), dtype=np.float32)
self.candidate_id_to_index = {}
# List of the candidates.
self.candidates = []
# numpy n-dimensional array of the candidate embeddings.
self.candidate_embeddings = np.ndarray((0, self.dimensions), dtype=np.float32)
self.best_score = NEG_INF
self.best_id = None
def candidate_ids(self) -> Iterable[str]:
"""Return the IDs of the candidates."""
return self.candidate_id_to_index.keys()
def _already_selected_embeddings(self) -> NDArray[np.float32]:
"""Return the selected embeddings sliced to the already assigned values."""
selected = len(self.selected_ids)
return np.vsplit(self.selected_embeddings, [selected])[0]
def _pop_candidate(self, candidate_id: str) -> tuple[float, NDArray[np.float32]]:
"""Pop the candidate with the given ID.
Returns:
The similarity score and embedding of the candidate.
"""
# Get the embedding for the id.
index = self.candidate_id_to_index.pop(candidate_id)
if self.candidates[index].id != candidate_id:
msg = (
"ID in self.candidate_id_to_index doesn't match the ID of the "
"corresponding index in self.candidates"
)
raise ValueError(msg)
embedding: NDArray[np.float32] = self.candidate_embeddings[index].copy()
# Swap that index with the last index in the candidates and
# candidate_embeddings.
last_index = self.candidate_embeddings.shape[0] - 1
similarity = 0.0
if index == last_index:
# Already the last item. We don't need to swap.
similarity = self.candidates.pop().similarity
else:
self.candidate_embeddings[index] = self.candidate_embeddings[last_index]
similarity = self.candidates[index].similarity
old_last = self.candidates.pop()
self.candidates[index] = old_last
self.candidate_id_to_index[old_last.id] = index
self.candidate_embeddings = np.vsplit(self.candidate_embeddings, [last_index])[
0
]
return similarity, embedding
def pop_best(self) -> str | None:
"""Select and pop the best item being considered.
Updates the consideration set based on it.
Returns:
A tuple containing the ID of the best item.
"""
if self.best_id is None or self.best_score < self.score_threshold:
return None
# Get the selection and remove from candidates.
selected_id = self.best_id
selected_similarity, selected_embedding = self._pop_candidate(selected_id)
# Add the ID and embedding to the selected information.
selection_index = len(self.selected_ids)
self.selected_ids.append(selected_id)
self.selected_mmr_scores.append(self.best_score)
self.selected_similarity_scores.append(selected_similarity)
self.selected_embeddings[selection_index] = selected_embedding
# Reset the best score / best ID.
self.best_score = NEG_INF
self.best_id = None
# Update the candidates redundancy, tracking the best node.
if self.candidate_embeddings.shape[0] > 0:
similarity = cosine_similarity(
self.candidate_embeddings, np.expand_dims(selected_embedding, axis=0)
)
for index, candidate in enumerate(self.candidates):
candidate.update_redundancy(similarity[index][0])
if candidate.score > self.best_score:
self.best_score = candidate.score
self.best_id = candidate.id
return selected_id
def add_candidates(self, candidates: dict[str, list[float]]) -> None:
"""Add candidates to the consideration set."""
# Determine the keys to actually include.
# These are the candidates that aren't already selected
# or under consideration.
include_ids_set = set(candidates.keys())
include_ids_set.difference_update(self.selected_ids)
include_ids_set.difference_update(self.candidate_id_to_index.keys())
include_ids = list(include_ids_set)
# Now, build up a matrix of the remaining candidate embeddings.
# And add them to the
new_embeddings: NDArray[np.float32] = np.ndarray(
(
len(include_ids),
self.dimensions,
)
)
offset = self.candidate_embeddings.shape[0]
for index, candidate_id in enumerate(include_ids):
if candidate_id in include_ids:
self.candidate_id_to_index[candidate_id] = offset + index
embedding = candidates[candidate_id]
new_embeddings[index] = embedding
# Compute the similarity to the query.
similarity = cosine_similarity(new_embeddings, self.query_embedding)
# Compute the distance metrics of all of pairs in the selected set with
# the new candidates.
redundancy = cosine_similarity(
new_embeddings, self._already_selected_embeddings()
)
for index, candidate_id in enumerate(include_ids):
max_redundancy = 0.0
if redundancy.shape[0] > 0:
max_redundancy = redundancy[index].max()
candidate = _Candidate(
id=candidate_id,
similarity=similarity[index][0],
weighted_similarity=self.lambda_mult * similarity[index][0],
weighted_redundancy=self.lambda_mult_complement * max_redundancy,
)
self.candidates.append(candidate)
if candidate.score >= self.best_score:
self.best_score = candidate.score
self.best_id = candidate.id
# Add the new embeddings to the candidate set.
self.candidate_embeddings = np.vstack(
(
self.candidate_embeddings,
new_embeddings,
)
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/graph_vectorstores/networkx.py | """Utilities for using Graph Vector Stores with networkx."""
import typing
from langchain_core.documents import Document
from langchain_community.graph_vectorstores.links import get_links
if typing.TYPE_CHECKING:
import networkx as nx
def documents_to_networkx(
documents: typing.Iterable[Document],
*,
tag_nodes: bool = True,
) -> "nx.DiGraph":
"""Return the networkx directed graph corresponding to the documents.
Args:
documents: The documents to convenrt to networkx.
tag_nodes: If `True`, each tag will be rendered as a node, with edges
to/from the corresponding documents. If `False`, edges will be
between documents, with a label corresponding to the tag(s)
connecting them.
"""
import networkx as nx
graph = nx.DiGraph()
tag_ids: typing.Dict[typing.Tuple[str, str], str] = {}
tag_labels: typing.Dict[str, str] = {}
documents_by_incoming: typing.Dict[str, typing.Set[str]] = {}
# First pass:
# - Register tag IDs for each unique (kind, tag).
# - If rendering tag nodes, add them to the graph.
# - If not rendering tag nodes, create a dictionary of documents by incoming tags.
for document in documents:
if document.id is None:
raise ValueError(f"Illegal graph document without ID: {document}")
for link in get_links(document):
tag_key = (link.kind, link.tag)
tag_id = tag_ids.get(tag_key)
if tag_id is None:
tag_id = f"tag_{len(tag_ids)}"
tag_ids[tag_key] = tag_id
if tag_nodes:
graph.add_node(tag_id, label=f"{link.kind}:{link.tag}")
if not tag_nodes and (link.direction == "in" or link.direction == "bidir"):
tag_labels[tag_id] = f"{link.kind}:{link.tag}"
documents_by_incoming.setdefault(tag_id, set()).add(document.id)
# Second pass:
# - Render document nodes
# - If rendering tag nodes, render edges to/from documents and tag nodes.
# - If not rendering tag nodes, render edges to/from documents based on tags.
for document in documents:
graph.add_node(document.id, text=document.page_content)
targets: typing.Dict[str, typing.List[str]] = {}
for link in get_links(document):
tag_id = tag_ids[(link.kind, link.tag)]
if tag_nodes:
if link.direction == "in" or link.direction == "bidir":
graph.add_edge(tag_id, document.id)
if link.direction == "out" or link.direction == "bidir":
graph.add_edge(document.id, tag_id)
else:
if link.direction == "out" or link.direction == "bidir":
label = tag_labels[tag_id]
for target in documents_by_incoming[tag_id]:
if target != document.id:
targets.setdefault(target, []).append(label)
# Avoid a multigraph by collecting the list of labels for each edge.
if not tag_nodes:
for target, labels in targets.items():
graph.add_edge(document.id, target, label=str(labels))
return graph
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/graph_vectorstores/links.py | from collections.abc import Iterable
from dataclasses import dataclass
from typing import Literal, Union
from langchain_core._api import beta
from langchain_core.documents import Document
@beta()
@dataclass(frozen=True)
class Link:
"""A link to/from a tag of a given kind.
Documents in a :class:`graph vector store <langchain_community.graph_vectorstores.base.GraphVectorStore>`
are connected via "links".
Links form a bipartite graph between documents and tags: documents are connected
to tags, and tags are connected to other documents.
When documents are retrieved from a graph vector store, a pair of documents are
connected with a depth of one if both documents are connected to the same tag.
Links have a ``kind`` property, used to namespace different tag identifiers.
For example a link to a keyword might use kind ``kw``, while a link to a URL might
use kind ``url``.
This allows the same tag value to be used in different contexts without causing
name collisions.
Links are directed. The directionality of links controls how the graph is
traversed at retrieval time.
For example, given documents ``A`` and ``B``, connected by links to tag ``T``:
+----------+----------+---------------------------------+
| A to T | B to T | Result |
+==========+==========+=================================+
| outgoing | incoming | Retrieval traverses from A to B |
+----------+----------+---------------------------------+
| incoming | incoming | No traversal from A to B |
+----------+----------+---------------------------------+
| outgoing | incoming | No traversal from A to B |
+----------+----------+---------------------------------+
| bidir | incoming | Retrieval traverses from A to B |
+----------+----------+---------------------------------+
| bidir | outgoing | No traversal from A to B |
+----------+----------+---------------------------------+
| outgoing | bidir | Retrieval traverses from A to B |
+----------+----------+---------------------------------+
| incoming | bidir | No traversal from A to B |
+----------+----------+---------------------------------+
Directed links make it possible to describe relationships such as term
references / definitions: term definitions are generally relevant to any documents
that use the term, but the full set of documents using a term generally aren't
relevant to the term's definition.
.. seealso::
- :mod:`How to use a graph vector store <langchain_community.graph_vectorstores>`
- :class:`How to link Documents on hyperlinks in HTML <langchain_community.graph_vectorstores.extractors.html_link_extractor.HtmlLinkExtractor>`
- :class:`How to link Documents on common keywords (using KeyBERT) <langchain_community.graph_vectorstores.extractors.keybert_link_extractor.KeybertLinkExtractor>`
- :class:`How to link Documents on common named entities (using GliNER) <langchain_community.graph_vectorstores.extractors.gliner_link_extractor.GLiNERLinkExtractor>`
How to add links to a Document
==============================
How to create links
-------------------
You can create links using the Link class's constructors :meth:`incoming`,
:meth:`outgoing`, and :meth:`bidir`::
from langchain_community.graph_vectorstores.links import Link
print(Link.bidir(kind="location", tag="Paris"))
.. code-block:: output
Link(kind='location', direction='bidir', tag='Paris')
Extending documents with links
------------------------------
Now that we know how to create links, let's associate them with some documents.
These edges will strengthen the connection between documents that share a keyword
when using a graph vector store to retrieve documents.
First, we'll load some text and chunk it into smaller pieces.
Then we'll add a link to each document to link them all together::
from langchain_community.document_loaders import TextLoader
from langchain_community.graph_vectorstores.links import add_links
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
raw_documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
for doc in documents:
add_links(doc, Link.bidir(kind="genre", tag="oratory"))
print(documents[0].metadata)
.. code-block:: output
{'source': 'state_of_the_union.txt', 'links': [Link(kind='genre', direction='bidir', tag='oratory')]}
As we can see, each document's metadata now includes a bidirectional link to the
genre ``oratory``.
The documents can then be added to a graph vector store::
from langchain_community.graph_vectorstores import CassandraGraphVectorStore
graph_vectorstore = CassandraGraphVectorStore.from_documents(
documents=documents, embeddings=...
)
""" # noqa: E501
kind: str
"""The kind of link. Allows different extractors to use the same tag name without
creating collisions between extractors. For example “keyword” vs “url”."""
direction: Literal["in", "out", "bidir"]
"""The direction of the link."""
tag: str
"""The tag of the link."""
@staticmethod
def incoming(kind: str, tag: str) -> "Link":
"""Create an incoming link.
Args:
kind: the link kind.
tag: the link tag.
"""
return Link(kind=kind, direction="in", tag=tag)
@staticmethod
def outgoing(kind: str, tag: str) -> "Link":
"""Create an outgoing link.
Args:
kind: the link kind.
tag: the link tag.
"""
return Link(kind=kind, direction="out", tag=tag)
@staticmethod
def bidir(kind: str, tag: str) -> "Link":
"""Create a bidirectional link.
Args:
kind: the link kind.
tag: the link tag.
"""
return Link(kind=kind, direction="bidir", tag=tag)
METADATA_LINKS_KEY = "links"
@beta()
def get_links(doc: Document) -> list[Link]:
"""Get the links from a document.
Args:
doc: The document to get the link tags from.
Returns:
The set of link tags from the document.
"""
links = doc.metadata.setdefault(METADATA_LINKS_KEY, [])
if not isinstance(links, list):
# Convert to a list and remember that.
links = list(links)
doc.metadata[METADATA_LINKS_KEY] = links
return links
@beta()
def add_links(doc: Document, *links: Union[Link, Iterable[Link]]) -> None:
"""Add links to the given metadata.
Args:
doc: The document to add the links to.
*links: The links to add to the document.
"""
links_in_metadata = get_links(doc)
for link in links:
if isinstance(link, Iterable):
links_in_metadata.extend(link)
else:
links_in_metadata.append(link)
@beta()
def copy_with_links(doc: Document, *links: Union[Link, Iterable[Link]]) -> Document:
"""Return a document with the given links added.
Args:
doc: The document to add the links to.
*links: The links to add to the document.
Returns:
A document with a shallow-copy of the metadata with the links added.
"""
new_links = set(get_links(doc))
for link in links:
if isinstance(link, Iterable):
new_links.update(link)
else:
new_links.add(link)
return Document(
page_content=doc.page_content,
metadata={
**doc.metadata,
METADATA_LINKS_KEY: list(new_links),
},
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.