id
stringlengths 14
15
| text
stringlengths 35
2.51k
| source
stringlengths 61
154
|
|---|---|---|
173f0a856c90-6
|
lambda_mult=lambda_mult,
)
candidates = _results_to_docs(results)
selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected]
return selected_results
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mul=lambda_mult, filter=filter
)
return docs
[docs] def delete_collection(self) -> None:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
|
173f0a856c90-7
|
)
return docs
[docs] def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
[docs] def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: {"text": "hello"}}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
"""
kwargs = {
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
}
if include is not None:
kwargs["include"] = include
return self._collection.get(**kwargs)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
|
173f0a856c90-8
|
kwargs["include"] = include
return self._collection.get(**kwargs)
[docs] def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You must specify a persist_directory on"
"creation to persist the collection."
)
self._client.persist()
[docs] def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
text = document.page_content
metadata = document.metadata
if self._embedding_function is None:
raise ValueError(
"For update, you must specify an embedding function on creation."
)
embeddings = self._embedding_function.embed_documents([text])
self._collection.update(
ids=[document_id],
embeddings=embeddings,
documents=[text],
metadatas=[metadata],
)
[docs] @classmethod
def from_texts(
cls: Type[Chroma],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
**kwargs: Any,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
|
173f0a856c90-9
|
client: Optional[chromadb.Client] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
)
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
[docs] @classmethod
def from_documents(
cls: Type[Chroma],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None, # Add this line
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
|
173f0a856c90-10
|
client: Optional[chromadb.Client] = None, # Add this line
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
)
[docs] def delete(self, ids: List[str]) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
self._collection.delete(ids=ids)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html
|
4a21a2dd68dc-0
|
Source code for langchain.vectorstores.matching_engine
"""Vertex Matching Engine implementation of the vector store."""
from __future__ import annotations
import json
import logging
import time
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Type
from langchain.docstore.document import Document
from langchain.embeddings import TensorflowHubEmbeddings
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from google.cloud import storage
from google.cloud.aiplatform import MatchingEngineIndex, MatchingEngineIndexEndpoint
from google.oauth2.service_account import Credentials
logger = logging.getLogger()
[docs]class MatchingEngine(VectorStore):
"""Vertex Matching Engine implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in docs/modules/indexes/vectorstores/examples/matchingengine.ipynb
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour."""
def __init__(
self,
project_id: str,
index: MatchingEngineIndex,
endpoint: MatchingEngineIndexEndpoint,
embedding: Embeddings,
gcs_client: storage.Client,
gcs_bucket_name: str,
credentials: Optional[Credentials] = None,
):
"""Vertex Matching Engine implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-1
|
using this module.
See usage in
docs/modules/indexes/vectorstores/examples/matchingengine.ipynb.
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour.
Attributes:
project_id: The GCS project id.
index: The created index class. See
~:func:`MatchingEngine.from_components`.
endpoint: The created endpoint class. See
~:func:`MatchingEngine.from_components`.
embedding: A :class:`Embeddings` that will be used for
embedding the text sent. If none is sent, then the
multilingual Tensorflow Universal Sentence Encoder will be used.
gcs_client: The GCS client.
gcs_bucket_name: The GCS bucket name.
credentials (Optional): Created GCP credentials.
"""
super().__init__()
self._validate_google_libraries_installation()
self.project_id = project_id
self.index = index
self.endpoint = endpoint
self.embedding = embedding
self.gcs_client = gcs_client
self.credentials = credentials
self.gcs_bucket_name = gcs_bucket_name
def _validate_google_libraries_installation(self) -> None:
"""Validates that Google libraries that are needed are installed."""
try:
from google.cloud import aiplatform, storage # noqa: F401
from google.oauth2 import service_account # noqa: F401
except ImportError:
raise ImportError(
"You must run `pip install --upgrade "
"google-cloud-aiplatform google-cloud-storage`"
"to use the MatchingEngine Vectorstore."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-2
|
"to use the MatchingEngine Vectorstore."
)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
logger.debug("Embedding documents.")
embeddings = self.embedding.embed_documents(list(texts))
jsons = []
ids = []
# Could be improved with async.
for embedding, text in zip(embeddings, texts):
id = str(uuid.uuid4())
ids.append(id)
jsons.append({"id": id, "embedding": embedding})
self._upload_to_gcs(text, f"documents/{id}")
logger.debug(f"Uploaded {len(ids)} documents to GCS.")
# Creating json lines from the embedded documents.
result_str = "\n".join([json.dumps(x) for x in jsons])
filename_prefix = f"indexes/{uuid.uuid4()}"
filename = f"{filename_prefix}/{time.time()}.json"
self._upload_to_gcs(result_str, filename)
logger.debug(
f"Uploaded updated json with embeddings to "
f"{self.gcs_bucket_name}/{filename}."
)
self.index = self.index.update_embeddings(
contents_delta_uri=f"gs://{self.gcs_bucket_name}/{filename_prefix}/"
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-3
|
)
logger.debug("Updated index with new configuration.")
return ids
def _upload_to_gcs(self, data: str, gcs_location: str) -> None:
"""Uploads data to gcs_location.
Args:
data: The data that will be stored.
gcs_location: The location where the data will be stored.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
blob.upload_from_string(data)
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: The string that will be used to search for similar documents.
k: The amount of neighbors that will be retrieved.
Returns:
A list of k matching documents.
"""
logger.debug(f"Embedding query {query}.")
embedding_query = self.embedding.embed_documents([query])
response = self.endpoint.match(
deployed_index_id=self._get_index_id(),
queries=embedding_query,
num_neighbors=k,
)
if len(response) == 0:
return []
logger.debug(f"Found {len(response)} matches for the query {query}.")
results = []
# I'm only getting the first one because queries receives an array
# and the similarity_search method only recevies one query. This
# means that the match method will always return an array with only
# one element.
for doc in response[0]:
page_content = self._download_from_gcs(f"documents/{doc.id}")
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-4
|
page_content = self._download_from_gcs(f"documents/{doc.id}")
results.append(Document(page_content=page_content))
logger.debug("Downloaded documents for query.")
return results
def _get_index_id(self) -> str:
"""Gets the correct index id for the endpoint.
Returns:
The index id if found (which should be found) or throws
ValueError otherwise.
"""
for index in self.endpoint.deployed_indexes:
if index.index == self.index.resource_name:
return index.id
raise ValueError(
f"No index with id {self.index.resource_name} "
f"deployed on endpoint "
f"{self.endpoint.display_name}."
)
def _download_from_gcs(self, gcs_location: str) -> str:
"""Downloads from GCS in text format.
Args:
gcs_location: The location where the file is located.
Returns:
The string contents of the file.
"""
bucket = self.gcs_client.get_bucket(self.gcs_bucket_name)
blob = bucket.blob(gcs_location)
return blob.download_as_string()
[docs] @classmethod
def from_texts(
cls: Type["MatchingEngine"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "MatchingEngine":
"""Use from components instead."""
raise NotImplementedError(
"This method is not implemented. Instead, you should initialize the class"
" with `MatchingEngine.from_components(...)` and then call "
"`add_texts`"
)
[docs] @classmethod
def from_components(
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-5
|
)
[docs] @classmethod
def from_components(
cls: Type["MatchingEngine"],
project_id: str,
region: str,
gcs_bucket_name: str,
index_id: str,
endpoint_id: str,
credentials_path: Optional[str] = None,
embedding: Optional[Embeddings] = None,
) -> "MatchingEngine":
"""Takes the object creation out of the constructor.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: The location where the vectors will be stored in
order for the index to be created.
index_id: The id of the created index.
endpoint_id: The id of the created endpoint.
credentials_path: (Optional) The path of the Google credentials on
the local file system.
embedding: The :class:`Embeddings` that will be used for
embedding the texts.
Returns:
A configured MatchingEngine with the texts added to the index.
"""
gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name)
credentials = cls._create_credentials_from_file(credentials_path)
index = cls._create_index_by_id(index_id, project_id, region, credentials)
endpoint = cls._create_endpoint_by_id(
endpoint_id, project_id, region, credentials
)
gcs_client = cls._get_gcs_client(credentials, project_id)
cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials)
return cls(
project_id=project_id,
index=index,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-6
|
return cls(
project_id=project_id,
index=index,
endpoint=endpoint,
embedding=embedding or cls._get_default_embeddings(),
gcs_client=gcs_client,
credentials=credentials,
gcs_bucket_name=gcs_bucket_name,
)
@classmethod
def _validate_gcs_bucket(cls, gcs_bucket_name: str) -> str:
"""Validates the gcs_bucket_name as a bucket name.
Args:
gcs_bucket_name: The received bucket uri.
Returns:
A valid gcs_bucket_name or throws ValueError if full path is
provided.
"""
gcs_bucket_name = gcs_bucket_name.replace("gs://", "")
if "/" in gcs_bucket_name:
raise ValueError(
f"The argument gcs_bucket_name should only be "
f"the bucket name. Received {gcs_bucket_name}"
)
return gcs_bucket_name
@classmethod
def _create_credentials_from_file(
cls, json_credentials_path: Optional[str]
) -> Optional[Credentials]:
"""Creates credentials for GCP.
Args:
json_credentials_path: The path on the file system where the
credentials are stored.
Returns:
An optional of Credentials or None, in which case the default
will be used.
"""
from google.oauth2 import service_account
credentials = None
if json_credentials_path is not None:
credentials = service_account.Credentials.from_service_account_file(
json_credentials_path
)
return credentials
@classmethod
def _create_index_by_id(
cls, index_id: str, project_id: str, region: str, credentials: "Credentials"
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-7
|
) -> MatchingEngineIndex:
"""Creates a MatchingEngineIndex object by id.
Args:
index_id: The created index id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndex.
"""
from google.cloud import aiplatform
logger.debug(f"Creating matching engine index with id {index_id}.")
return aiplatform.MatchingEngineIndex(
index_name=index_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _create_endpoint_by_id(
cls, endpoint_id: str, project_id: str, region: str, credentials: "Credentials"
) -> MatchingEngineIndexEndpoint:
"""Creates a MatchingEngineIndexEndpoint object by id.
Args:
endpoint_id: The created endpoint id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndexEndpoint.
"""
from google.cloud import aiplatform
logger.debug(f"Creating endpoint with id {endpoint_id}.")
return aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=endpoint_id,
project=project_id,
location=region,
credentials=credentials,
)
@classmethod
def _get_gcs_client(
cls, credentials: "Credentials", project_id: str
) -> "storage.Client":
"""Lazily creates a GCS client.
Returns:
A configured GCS client.
"""
from google.cloud import storage
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
4a21a2dd68dc-8
|
A configured GCS client.
"""
from google.cloud import storage
return storage.Client(credentials=credentials, project=project_id)
@classmethod
def _init_aiplatform(
cls,
project_id: str,
region: str,
gcs_bucket_name: str,
credentials: "Credentials",
) -> None:
"""Configures the aiplatform library.
Args:
project_id: The GCP project id.
region: The default location making the API calls. It must have
the same location as the GCS bucket and must be regional.
gcs_bucket_name: GCS staging location.
credentials: The GCS Credentials object.
"""
from google.cloud import aiplatform
logger.debug(
f"Initializing AI Platform for project {project_id} on "
f"{region} and for {gcs_bucket_name}."
)
aiplatform.init(
project=project_id,
location=region,
staging_bucket=gcs_bucket_name,
credentials=credentials,
)
@classmethod
def _get_default_embeddings(cls) -> TensorflowHubEmbeddings:
"""This function returns the default embedding.
Returns:
Default TensorflowHubEmbeddings to use.
"""
return TensorflowHubEmbeddings()
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/matching_engine.html
|
6b03f8d18f8b-0
|
Source code for langchain.vectorstores.atlas
"""Wrapper around Atlas by Nomic."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger(__name__)
[docs]class AtlasDB(VectorStore):
"""Wrapper around Atlas: Nomic's neural database and rhizomatic instrument.
To use, you should have the ``nomic`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import AtlasDB
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = AtlasDB("my_project", embeddings.embed_query)
"""
_ATLAS_DEFAULT_ID_FIELD = "atlas_id"
def __init__(
self,
name: str,
embedding_function: Optional[Embeddings] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
) -> None:
"""
Initialize the Atlas Client
Args:
name (str): The name of your project. If the project already exists,
it will be loaded.
embedding_function (Optional[Callable]): An optional function used for
embedding your data. If None, data will be embedded with
Nomic's embed model.
api_key (str): Your nomic API key
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
6b03f8d18f8b-1
|
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
"""
try:
import nomic
from nomic import AtlasProject
except ImportError:
raise ValueError(
"Could not import nomic python package. "
"Please install it with `pip install nomic`."
)
if api_key is None:
raise ValueError("No API key provided. Sign up at atlas.nomic.ai!")
nomic.login(api_key)
self._embedding_function = embedding_function
modality = "text"
if self._embedding_function is not None:
modality = "embedding"
# Check if the project exists, create it if not
self.project = AtlasProject(
name=name,
description=description,
modality=modality,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
unique_id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD,
)
self.project._latest_project_state()
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
refresh: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
6b03f8d18f8b-2
|
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]]): An optional list of ids.
refresh(bool): Whether or not to refresh indices with the updated data.
Default True.
Returns:
List[str]: List of IDs of the added texts.
"""
if (
metadatas is not None
and len(metadatas) > 0
and "text" in metadatas[0].keys()
):
raise ValueError("Cannot accept key text in metadata!")
texts = list(texts)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
# Embedding upload case
if self._embedding_function is not None:
_embeddings = self._embedding_function.embed_documents(texts)
embeddings = np.stack(_embeddings)
if metadatas is None:
data = [
{AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], "text": texts[i]}
for i, _ in enumerate(texts)
]
else:
for i in range(len(metadatas)):
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
metadatas[i]["text"] = texts[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_embeddings(embeddings=embeddings, data=data)
# Text upload case
else:
if metadatas is None:
data = [
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
6b03f8d18f8b-3
|
else:
if metadatas is None:
data = [
{"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]}
for i, text in enumerate(texts)
]
else:
for i, text in enumerate(texts):
metadatas[i]["text"] = texts
metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i]
data = metadatas
self.project._validate_map_data_inputs(
[], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data
)
with self.project.wait_for_project_lock():
self.project.add_text(data)
if refresh:
if len(self.project.indices) > 0:
with self.project.wait_for_project_lock():
self.project.rebuild_maps()
return ids
[docs] def create_index(self, **kwargs: Any) -> Any:
"""Creates an index in your project.
See
https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index
for full detail.
"""
with self.project.wait_for_project_lock():
return self.project.create_index(**kwargs)
[docs] def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with AtlasDB
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List[Document]: List of documents most similar to the query text.
"""
if self._embedding_function is None:
raise NotImplementedError(
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
6b03f8d18f8b-4
|
"""
if self._embedding_function is None:
raise NotImplementedError(
"AtlasDB requires an embedding_function for text similarity search!"
)
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
with self.project.wait_for_project_lock():
neighbors, _ = self.project.projections[0].vector_search(
queries=embedding, k=k
)
datas = self.project.get_data(ids=neighbors[0])
docs = [
Document(page_content=datas[i]["text"], metadata=datas[i])
for i, neighbor in enumerate(neighbors)
]
return docs
[docs] @classmethod
def from_texts(
cls: Type[AtlasDB],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a raw documents.
Args:
texts (List[str]): The list of texts to ingest.
name (str): Name of the project to create.
api_key (str): Your nomic API key,
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
6b03f8d18f8b-5
|
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
# Inject relevant kwargs
all_index_kwargs = {"name": name + "_index", "indexed_field": "text"}
if index_kwargs is not None:
for k, v in index_kwargs.items():
all_index_kwargs[k] = v
# Build project
atlasDB = cls(
name,
embedding_function=embedding,
api_key=api_key,
description="A description for your project",
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
)
with atlasDB.project.wait_for_project_lock():
atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids)
atlasDB.create_index(**all_index_kwargs)
return atlasDB
[docs] @classmethod
def from_documents(
cls: Type[AtlasDB],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
6b03f8d18f8b-6
|
ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
persist_directory: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> AtlasDB:
"""Create an AtlasDB vectorstore from a list of documents.
Args:
name (str): Name of the collection to create.
api_key (str): Your nomic API key,
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if
it already exists. Default False.
Generally userful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError("`name` and `api_key` cannot be None.")
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
name=name,
api_key=api_key,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
6b03f8d18f8b-7
|
return cls.from_texts(
name=name,
api_key=api_key,
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
description=description,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
index_kwargs=index_kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html
|
b8c55f08f1e5-0
|
Source code for langchain.vectorstores.supabase
from __future__ import annotations
import uuid
from itertools import repeat
from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import supabase
[docs]class SupabaseVectorStore(VectorStore):
"""VectorStore for a Supabase postgres database. Assumes you have the `pgvector`
extension installed and a `match_documents` (or similar) function. For more details:
https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
You can implement your own `match_documents` function in order to limit the search
space to a subset of documents based on your own authorization or business logic.
Note that the Supabase Python client does not yet support async operations.
If you'd like to use `max_marginal_relevance_search`, please review the instructions
below on modifying the `match_documents` function to return matched embeddings.
"""
_client: supabase.client.Client
# This is the embedding function. Don't confuse with the embedding vectors.
# We should perhaps rename the underlying Embedding base class to EmbeddingFunction
# or something
_embedding: Embeddings
table_name: str
query_name: str
def __init__(
self,
client: supabase.client.Client,
embedding: Embeddings,
table_name: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
b8c55f08f1e5-1
|
embedding: Embeddings,
table_name: str,
query_name: Union[str, None] = None,
) -> None:
"""Initialize with supabase client."""
try:
import supabase # noqa: F401
except ImportError:
raise ValueError(
"Could not import supabase python package. "
"Please install it with `pip install supabase`."
)
self._client = client
self._embedding: Embeddings = embedding
self.table_name = table_name or "documents"
self.query_name = query_name or "match_documents"
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict[Any, Any]]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
ids = ids or [str(uuid.uuid4()) for _ in texts]
docs = self._texts_to_documents(texts, metadatas)
vectors = self._embedding.embed_documents(list(texts))
return self.add_vectors(vectors, docs, ids)
[docs] @classmethod
def from_texts(
cls: Type["SupabaseVectorStore"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[supabase.client.Client] = None,
table_name: Optional[str] = "documents",
query_name: Union[str, None] = "match_documents",
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> "SupabaseVectorStore":
"""Return VectorStore initialized from texts and embeddings."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
b8c55f08f1e5-2
|
"""Return VectorStore initialized from texts and embeddings."""
if not client:
raise ValueError("Supabase client is required.")
if not table_name:
raise ValueError("Supabase document table_name is required.")
embeddings = embedding.embed_documents(texts)
ids = [str(uuid.uuid4()) for _ in texts]
docs = cls._texts_to_documents(texts, metadatas)
_ids = cls._add_vectors(client, table_name, embeddings, docs, ids)
return cls(
client=client,
embedding=embedding,
table_name=table_name,
query_name=query_name,
)
[docs] def add_vectors(
self,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
) -> List[str]:
return self._add_vectors(self._client, self.table_name, vectors, documents, ids)
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
vectors = self._embedding.embed_documents([query])
return self.similarity_search_by_vector(vectors[0], k)
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
result = self.similarity_search_by_vector_with_relevance_scores(embedding, k)
documents = [doc for doc, _ in result]
return documents
[docs] def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
b8c55f08f1e5-3
|
) -> List[Tuple[Document, float]]:
vectors = self._embedding.embed_documents([query])
return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k)
[docs] def similarity_search_by_vector_with_relevance_scores(
self, query: List[float], k: int
) -> List[Tuple[Document, float]]:
match_documents_params = dict(query_embedding=query, match_count=k)
res = self._client.rpc(self.query_name, match_documents_params).execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
)
for search in res.data
if search.get("content")
]
return match_result
[docs] def similarity_search_by_vector_returning_embeddings(
self, query: List[float], k: int
) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]:
match_documents_params = dict(query_embedding=query, match_count=k)
res = self._client.rpc(self.query_name, match_documents_params).execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
# Supabase returns a vector type as its string represation (!).
# This is a hack to convert the string to numpy array.
np.fromstring(
search.get("embedding", "").strip("[]"), np.float32, sep=","
),
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
b8c55f08f1e5-4
|
),
)
for search in res.data
if search.get("content")
]
return match_result
@staticmethod
def _texts_to_documents(
texts: Iterable[str],
metadatas: Optional[Iterable[dict[Any, Any]]] = None,
) -> List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [
Document(page_content=text, metadata=metadata)
for text, metadata in zip(texts, metadatas)
]
return docs
@staticmethod
def _add_vectors(
client: supabase.client.Client,
table_name: str,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
) -> List[str]:
"""Add vectors to Supabase table."""
rows: List[dict[str, Any]] = [
{
"id": ids[idx],
"content": documents[idx].page_content,
"embedding": embedding,
"metadata": documents[idx].metadata, # type: ignore
}
for idx, embedding in enumerate(vectors)
]
# According to the SupabaseVectorStore JS implementation, the best chunk size
# is 500
chunk_size = 500
id_list: List[str] = []
for i in range(0, len(rows), chunk_size):
chunk = rows[i : i + chunk_size]
result = client.from_(table_name).upsert(chunk).execute() # type: ignore
if len(result.data) == 0:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
b8c55f08f1e5-5
|
if len(result.data) == 0:
raise Exception("Error inserting: No rows added")
# VectorStore.add_vectors returns ids as strings
ids = [str(i.get("id")) for i in result.data if i.get("id")]
id_list.extend(ids)
return id_list
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
result = self.similarity_search_by_vector_returning_embeddings(
embedding, fetch_k
)
matched_documents = [doc_tuple[0] for doc_tuple in result]
matched_embeddings = [doc_tuple[2] for doc_tuple in result]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
matched_embeddings,
k=k,
lambda_mult=lambda_mult,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
b8c55f08f1e5-6
|
matched_embeddings,
k=k,
lambda_mult=lambda_mult,
)
filtered_documents = [matched_documents[i] for i in mmr_selected]
return filtered_documents
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
`max_marginal_relevance_search` requires that `query_name` returns matched
embeddings alongside the match documents. The following function
demonstrates how to do this:
```sql
CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536),
match_count int)
RETURNS TABLE(
id bigint,
content text,
metadata jsonb,
embedding vector(1536),
similarity float)
LANGUAGE plpgsql
AS $$
# variable_conflict use_column
BEGIN
RETURN query
SELECT
id,
content,
metadata,
embedding,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
b8c55f08f1e5-7
|
SELECT
id,
content,
metadata,
embedding,
1 -(docstore.embedding <=> query_embedding) AS similarity
FROM
docstore
ORDER BY
docstore.embedding <=> query_embedding
LIMIT match_count;
END;
$$;
```
"""
embedding = self._embedding.embed_documents([query])
docs = self.max_marginal_relevance_search_by_vector(
embedding[0], k, fetch_k, lambda_mult=lambda_mult
)
return docs
[docs] def delete(self, ids: List[str]) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
rows: List[dict[str, Any]] = [
{
"id": id,
}
for id in ids
]
# TODO: Check if this can be done in bulk
for row in rows:
self._client.from_(self.table_name).delete().eq("id", row["id"]).execute()
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html
|
6937ad042eb1-0
|
Source code for langchain.vectorstores.awadb
"""Wrapper around AwaDB for embedding vectors"""
from __future__ import annotations
import logging
import uuid
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
# from pydantic import BaseModel, Field, root_validator
if TYPE_CHECKING:
import awadb
logger = logging.getLogger()
DEFAULT_TOPN = 4
[docs]class AwaDB(VectorStore):
"""Interface implemented by AwaDB vector stores."""
_DEFAULT_TABLE_NAME = "langchain_awadb"
def __init__(
self,
table_name: str = _DEFAULT_TABLE_NAME,
embedding: Optional[Embeddings] = None,
log_and_data_dir: Optional[str] = None,
client: Optional[awadb.Client] = None,
) -> None:
"""Initialize with AwaDB client."""
try:
import awadb
except ImportError:
raise ValueError(
"Could not import awadb python package. "
"Please install it with `pip install awadb`."
)
if client is not None:
self.awadb_client = client
else:
if log_and_data_dir is not None:
self.awadb_client = awadb.Client(log_and_data_dir)
else:
self.awadb_client = awadb.Client()
if table_name == self._DEFAULT_TABLE_NAME:
table_name += "_"
table_name += str(uuid.uuid4()).split("-")[-1]
self.awadb_client.Create(table_name)
self.table2embeddings: dict[str, Embeddings] = {}
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
6937ad042eb1-1
|
self.table2embeddings: dict[str, Embeddings] = {}
if embedding is not None:
self.table2embeddings[table_name] = embedding
self.using_table_name = table_name
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
is_duplicate_texts: Optional[bool] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
is_duplicate_texts: Optional whether to duplicate texts.
kwargs: vectorstore specific parameters.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embeddings = None
if self.using_table_name in self.table2embeddings:
embeddings = self.table2embeddings[self.using_table_name].embed_documents(
list(texts)
)
return self.awadb_client.AddTexts(
"embedding_text",
"text_embedding",
texts,
embeddings,
metadatas,
is_duplicate_texts,
)
[docs] def load_local(
self,
table_name: str,
**kwargs: Any,
) -> bool:
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
return self.awadb_client.Load(table_name)
[docs] def similarity_search(
self,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
6937ad042eb1-2
|
[docs] def similarity_search(
self,
query: str,
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query."""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import llm_embedding
llm = llm_embedding.LLMEmbedding()
embedding = llm.Embedding(query)
return self.similarity_search_by_vector(embedding, k)
[docs] def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
else:
from awadb import llm_embedding
llm = llm_embedding.LLMEmbedding()
embedding = llm.Embedding(query)
results: List[Tuple[Document, float]] = []
scores: List[float] = []
retrieval_docs = self.similarity_search_by_vector(embedding, k, scores)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
6937ad042eb1-3
|
retrieval_docs = self.similarity_search_by_vector(embedding, k, scores)
L2_Norm = 0.0
for score in scores:
L2_Norm = L2_Norm + score * score
L2_Norm = pow(L2_Norm, 0.5)
doc_no = 0
for doc in retrieval_docs:
doc_tuple = (doc, 1 - (scores[doc_no] / L2_Norm))
results.append(doc_tuple)
doc_no = doc_no + 1
return results
[docs] def similarity_search_with_relevance_scores(
self,
query: str,
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores, normalized on a scale from 0 to 1.
0 is dissimilar, 1 is most similar.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
embedding = None
if self.using_table_name in self.table2embeddings:
embedding = self.table2embeddings[self.using_table_name].embed_query(query)
show_results = self.awadb_client.Search(embedding, k)
results: List[Tuple[Document, float]] = []
if show_results.__len__() == 0:
return results
scores: List[float] = []
retrieval_docs = self.similarity_search_by_vector(embedding, k, scores)
L2_Norm = 0.0
for score in scores:
L2_Norm = L2_Norm + score * score
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
6937ad042eb1-4
|
L2_Norm = L2_Norm + score * score
L2_Norm = pow(L2_Norm, 0.5)
doc_no = 0
for doc in retrieval_docs:
doc_tuple = (doc, 1 - scores[doc_no] / L2_Norm)
results.append(doc_tuple)
doc_no = doc_no + 1
return results
[docs] def similarity_search_by_vector(
self,
embedding: Optional[List[float]] = None,
k: int = DEFAULT_TOPN,
scores: Optional[list] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
if self.awadb_client is None:
raise ValueError("AwaDB client is None!!!")
results: List[Document] = []
if embedding is None:
return results
show_results = self.awadb_client.Search(embedding, k)
if show_results.__len__() == 0:
return results
for item_detail in show_results[0]["ResultItems"]:
content = ""
meta_data = {}
for item_key in item_detail:
if (
item_key == "Field@0"
and self.using_table_name in self.table2embeddings
): # text for the document
content = item_detail[item_key]
elif item_key == "embedding_text":
content = item_detail[item_key]
elif (
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
6937ad042eb1-5
|
content = item_detail[item_key]
elif (
item_key == "Field@1" or item_key == "text_embedding"
): # embedding field for the document
continue
elif item_key == "score": # L2 distance
if scores is not None:
score = item_detail[item_key]
scores.append(score)
else:
meta_data[item_key] = item_detail[item_key]
results.append(Document(page_content=content, metadata=meta_data))
return results
[docs] def create_table(
self,
table_name: str,
**kwargs: Any,
) -> bool:
"""Create a new table."""
if self.awadb_client is None:
return False
ret = self.awadb_client.Create(table_name)
if ret:
self.using_table_name = table_name
return ret
[docs] def use(
self,
table_name: str,
**kwargs: Any,
) -> bool:
"""Use the specified table. Don't know the tables, please invoke list_tables."""
if self.awadb_client is None:
return False
ret = self.awadb_client.Use(table_name)
if ret:
self.using_table_name = table_name
return ret
[docs] def list_tables(
self,
**kwargs: Any,
) -> List[str]:
"""List all the tables created by the client."""
if self.awadb_client is None:
return []
return self.awadb_client.ListAllTables()
[docs] def get_current_table(
self,
**kwargs: Any,
) -> str:
"""Get the current table."""
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
6937ad042eb1-6
|
) -> str:
"""Get the current table."""
return self.using_table_name
[docs] @classmethod
def from_texts(
cls: Type[AwaDB],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
table_name: str = _DEFAULT_TABLE_NAME,
log_and_data_dir: Optional[str] = None,
client: Optional[awadb.Client] = None,
**kwargs: Any,
) -> AwaDB:
"""Create an AwaDB vectorstore from a raw documents.
Args:
texts (List[str]): List of texts to add to the table.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory of logging and persistence.
client (Optional[awadb.Client]): AwaDB client
Returns:
AwaDB: AwaDB vectorstore.
"""
awadb_client = cls(
table_name=table_name,
embedding=embedding,
log_and_data_dir=log_and_data_dir,
client=client,
)
awadb_client.add_texts(texts=texts, metadatas=metadatas)
return awadb_client
[docs] @classmethod
def from_documents(
cls: Type[AwaDB],
documents: List[Document],
embedding: Optional[Embeddings] = None,
table_name: str = _DEFAULT_TABLE_NAME,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
6937ad042eb1-7
|
table_name: str = _DEFAULT_TABLE_NAME,
log_and_data_dir: Optional[str] = None,
client: Optional[awadb.Client] = None,
**kwargs: Any,
) -> AwaDB:
"""Create an AwaDB vectorstore from a list of documents.
If a log_and_data_dir specified, the table will be persisted there.
Args:
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
table_name (str): Name of the table to create.
log_and_data_dir (Optional[str]): Directory to persist the table.
client (Optional[awadb.Client]): AwaDB client
Returns:
AwaDB: AwaDB vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
table_name=table_name,
log_and_data_dir=log_and_data_dir,
client=client,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/awadb.html
|
945561975e29-0
|
Source code for langchain.vectorstores.elastic_vector_search
"""Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from abc import ABC
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Union,
)
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
def _default_text_mapping(dim: int) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {"type": "dense_vector", "dims": dim},
}
}
def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict:
if filter:
((key, value),) = filter.items()
filter = {"match": {f"metadata.{key}.keyword": f"{value}"}}
else:
filter = {"match_all": {}}
return {
"script_score": {
"query": filter,
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
"params": {"query_vector": query_vector},
},
}
}
# ElasticVectorSearch is a concrete implementation of the abstract base class
# VectorStore, which defines a common interface for all vector database
# implementations. By inheriting from the ABC class, ElasticVectorSearch can be
# defined as an abstract base class itself, allowing the creation of subclasses with
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-1
|
# defined as an abstract base class itself, allowing the creation of subclasses with
# their own specific implementations. If you plan to subclass ElasticVectorSearch,
# you can inherit from it and define your own implementation of the necessary methods
# and attributes.
[docs]class ElasticVectorSearch(VectorStore, ABC):
"""Wrapper around Elasticsearch as a vector database.
To connect to an Elasticsearch instance that does not require
login credentials, pass the Elasticsearch URL and index name along with the
embedding object to the constructor.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url="http://localhost:9200",
index_name="test_index",
embedding=embedding
)
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-2
|
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_host = "cluster_id.region_id.gcp.cloud.es.io"
elasticsearch_url = f"https://username:password@{elastic_host}:9243"
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url=elasticsearch_url,
index_name="test_index",
embedding=embedding
)
Args:
elasticsearch_url (str): The URL for the Elasticsearch instance.
index_name (str): The name of the Elasticsearch index for the embeddings.
embedding (Embeddings): An object that provides the ability to embed text.
It should be an instance of a class that subclasses the Embeddings
abstract base class, such as OpenAIEmbeddings()
Raises:
ValueError: If the elasticsearch python package is not installed.
"""
def __init__(
self,
elasticsearch_url: str,
index_name: str,
embedding: Embeddings,
*,
ssl_verify: Optional[Dict[str, Any]] = None,
):
"""Initialize with necessary components."""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
self.embedding = embedding
self.index_name = index_name
_ssl_verify = ssl_verify or {}
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-3
|
self.index_name = index_name
_ssl_verify = ssl_verify or {}
try:
self.client = elasticsearch.Elasticsearch(elasticsearch_url, **_ssl_verify)
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is mis-formatted. Got error: {e} "
)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
refresh_indices: bool = True,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
embeddings = self.embedding.embed_documents(list(texts))
dim = len(embeddings[0])
mapping = _default_text_mapping(dim)
# check to see if the index already exists
try:
self.client.indices.get(index=self.index_name)
except NotFoundError:
# TODO would be nice to create index before embedding,
# just to save expensive steps for last
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-4
|
# just to save expensive steps for last
self.create_index(self.client, self.index_name, mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {
"_op_type": "index",
"_index": self.index_name,
"vector": embeddings[i],
"text": text,
"metadata": metadata,
"_id": ids[i],
}
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
[docs] def similarity_search(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
documents = [d[0] for d in docs_and_scores]
return documents
[docs] def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-5
|
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(query)
script_query = _default_script_query(embedding, filter)
response = self.client_search(
self.client, self.index_name, script_query, size=k
)
hits = [hit for hit in response["hits"]["hits"]]
docs_and_scores = [
(
Document(
page_content=hit["_source"]["text"],
metadata=hit["_source"]["metadata"],
),
hit["_score"],
)
for hit in hits
]
return docs_and_scores
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
elasticsearch_url: Optional[str] = None,
index_name: Optional[str] = None,
refresh_indices: bool = True,
**kwargs: Any,
) -> ElasticVectorSearch:
"""Construct ElasticVectorSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Elasticsearch instance.
3. Adds the documents to the newly created Elasticsearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch.from_texts(
texts,
embeddings,
elasticsearch_url="http://localhost:9200"
)
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-6
|
elasticsearch_url="http://localhost:9200"
)
"""
elasticsearch_url = elasticsearch_url or get_from_env(
"elasticsearch_url", "ELASTICSEARCH_URL"
)
index_name = index_name or uuid.uuid4().hex
vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs)
vectorsearch.add_texts(
texts, metadatas=metadatas, refresh_indices=refresh_indices
)
return vectorsearch
[docs] def create_index(self, client: Any, index_name: str, mapping: Dict) -> None:
version_num = client.info()["version"]["number"][0]
version_num = int(version_num)
if version_num >= 8:
client.indices.create(index=index_name, mappings=mapping)
else:
client.indices.create(index=index_name, body={"mappings": mapping})
[docs] def client_search(
self, client: Any, index_name: str, script_query: Dict, size: int
) -> Any:
version_num = client.info()["version"]["number"][0]
version_num = int(version_num)
if version_num >= 8:
response = client.search(index=index_name, query=script_query, size=size)
else:
response = client.search(
index=index_name, body={"query": script_query, "size": size}
)
return response
[docs] def delete(self, ids: List[str]) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
# TODO: Check if this can be done in bulk
for id in ids:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-7
|
# TODO: Check if this can be done in bulk
for id in ids:
self.client.delete(index=self.index_name, id=id)
[docs]class ElasticKnnSearch(ElasticVectorSearch):
"""
A class for performing k-Nearest Neighbors (k-NN) search on an Elasticsearch index.
The class is designed for a text search scenario where documents are text strings
and their embeddings are vector representations of those strings.
"""
def __init__(
self,
index_name: str,
embedding: Embeddings,
es_connection: Optional["Elasticsearch"] = None,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
vector_query_field: Optional[str] = "vector",
query_field: Optional[str] = "text",
):
"""
Initializes an instance of the ElasticKnnSearch class and sets up the
Elasticsearch client.
Args:
index_name: The name of the Elasticsearch index.
embedding: An instance of the Embeddings class, used to generate vector
representations of text strings.
es_connection: An existing Elasticsearch connection.
es_cloud_id: The Cloud ID of the Elasticsearch instance. Required if
creating a new connection.
es_user: The username for the Elasticsearch instance. Required if
creating a new connection.
es_password: The password for the Elasticsearch instance. Required if
creating a new connection.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
self.embedding = embedding
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-8
|
)
self.embedding = embedding
self.index_name = index_name
self.query_field = query_field
self.vector_query_field = vector_query_field
# If a pre-existing Elasticsearch connection is provided, use it.
if es_connection is not None:
self.client = es_connection
else:
# If credentials for a new Elasticsearch connection are provided,
# create a new connection.
if es_cloud_id and es_user and es_password:
self.client = elasticsearch.Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
else:
raise ValueError(
"""Either provide a pre-existing Elasticsearch connection, \
or valid credentials for creating a new connection."""
)
@staticmethod
def _default_knn_mapping(dims: int) -> Dict:
"""Generates a default index mapping for kNN search."""
return {
"properties": {
"text": {"type": "text"},
"vector": {
"type": "dense_vector",
"dims": dims,
"index": True,
"similarity": "dot_product",
},
}
}
def _default_knn_query(
self,
query_vector: Optional[List[float]] = None,
query: Optional[str] = None,
model_id: Optional[str] = None,
k: Optional[int] = 10,
num_candidates: Optional[int] = 10,
) -> Dict:
knn: Dict = {
"field": self.vector_query_field,
"k": k,
"num_candidates": num_candidates,
}
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-9
|
"k": k,
"num_candidates": num_candidates,
}
# Case 1: `query_vector` is provided, but not `model_id` -> use query_vector
if query_vector and not model_id:
knn["query_vector"] = query_vector
# Case 2: `query` and `model_id` are provided, -> use query_vector_builder
elif query and model_id:
knn["query_vector_builder"] = {
"text_embedding": {
"model_id": model_id, # use 'model_id' argument
"model_text": query, # use 'query' argument
}
}
else:
raise ValueError(
"Either `query_vector` or `model_id` must be provided, but not both."
)
return knn
[docs] def knn_search(
self,
query: Optional[str] = None,
k: Optional[int] = 10,
query_vector: Optional[List[float]] = None,
model_id: Optional[str] = None,
size: Optional[int] = 10,
source: Optional[bool] = True,
fields: Optional[
Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]
] = None,
) -> Dict:
"""
Performs a k-nearest neighbor (k-NN) search on the Elasticsearch index.
The search can be conducted using either a raw query vector or a model ID.
The method first generates
the body of the search query, which can be interpreted by Elasticsearch.
It then performs the k-NN
search on the Elasticsearch index and returns the results.
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-10
|
search on the Elasticsearch index and returns the results.
Args:
query: The query or queries to be used for the search. Required if
`query_vector` is not provided.
k: The number of nearest neighbors to return. Defaults to 10.
query_vector: The query vector to be used for the search. Required if
`query` is not provided.
model_id: The ID of the model to use for generating the query vector, if
`query` is provided.
size: The number of search hits to return. Defaults to 10.
source: Whether to include the source of each hit in the results.
fields: The fields to include in the source of each hit. If None, all
fields are included.
vector_query_field: Field name to use in knn search if not default 'vector'
Returns:
The search results.
Raises:
ValueError: If neither `query_vector` nor `model_id` is provided, or if
both are provided.
"""
knn_query_body = self._default_knn_query(
query_vector=query_vector, query=query, model_id=model_id, k=k
)
# Perform the kNN search on the Elasticsearch index and return the results.
res = self.client.search(
index=self.index_name,
knn=knn_query_body,
size=size,
source=source,
fields=fields,
)
return dict(res)
[docs] def knn_hybrid_search(
self,
query: Optional[str] = None,
k: Optional[int] = 10,
query_vector: Optional[List[float]] = None,
model_id: Optional[str] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-11
|
model_id: Optional[str] = None,
size: Optional[int] = 10,
source: Optional[bool] = True,
knn_boost: Optional[float] = 0.9,
query_boost: Optional[float] = 0.1,
fields: Optional[
Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]
] = None,
) -> Dict[Any, Any]:
"""Performs a hybrid k-nearest neighbor (k-NN) and text-based search on the
Elasticsearch index.
The search can be conducted using either a raw query vector or a model ID.
The method first generates
the body of the k-NN search query and the text-based query, which can be
interpreted by Elasticsearch.
It then performs the hybrid search on the Elasticsearch index and returns the
results.
Args:
query: The query or queries to be used for the search. Required if
`query_vector` is not provided.
k: The number of nearest neighbors to return. Defaults to 10.
query_vector: The query vector to be used for the search. Required if
`query` is not provided.
model_id: The ID of the model to use for generating the query vector, if
`query` is provided.
size: The number of search hits to return. Defaults to 10.
source: Whether to include the source of each hit in the results.
knn_boost: The boost factor for the k-NN part of the search.
query_boost: The boost factor for the text-based part of the search.
fields
The fields to include in the source of each hit. If None, all fields are
included. Defaults to None.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
945561975e29-12
|
included. Defaults to None.
vector_query_field: Field name to use in knn search if not default 'vector'
query_field: Field name to use in search if not default 'text'
Returns:
The search results.
Raises:
ValueError: If neither `query_vector` nor `model_id` is provided, or if
both are provided.
"""
knn_query_body = self._default_knn_query(
query_vector=query_vector, query=query, model_id=model_id, k=k
)
# Modify the knn_query_body to add a "boost" parameter
knn_query_body["boost"] = knn_boost
# Generate the body of the standard Elasticsearch query
match_query_body = {
"match": {self.query_field: {"query": query, "boost": query_boost}}
}
# Perform the hybrid search on the Elasticsearch index and return the results.
res = self.client.search(
index=self.index_name,
query=match_query_body,
knn=knn_query_body,
fields=fields,
size=size,
source=source,
)
return dict(res)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
|
fa0bf0c43f71-0
|
Source code for langchain.vectorstores.mongodb_atlas
from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
)
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
from pymongo.collection import Collection
MongoDBDocumentType = TypeVar("MongoDBDocumentType", bound=Dict[str, Any])
logger = logging.getLogger(__name__)
DEFAULT_INSERT_BATCH_SIZE = 100
[docs]class MongoDBAtlasVectorSearch(VectorStore):
"""Wrapper around MongoDB Atlas Vector Search.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with a MongoDB Atlas Cluster having deployed an
Atlas Search index
Example:
.. code-block:: python
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.embeddings.openai import OpenAIEmbeddings
from pymongo import MongoClient
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch(collection, embeddings)
"""
def __init__(
self,
collection: Collection[MongoDBDocumentType],
embedding: Embeddings,
*,
index_name: str = "default",
text_key: str = "text",
embedding_key: str = "embedding",
):
"""
Args:
collection: MongoDB collection to add the texts to.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
|
fa0bf0c43f71-1
|
"""
Args:
collection: MongoDB collection to add the texts to.
embedding: Text embedding model to use.
text_key: MongoDB field that will contain the text for each
document.
embedding_key: MongoDB field that will contain the embedding for
each document.
"""
self._collection = collection
self._embedding = embedding
self._index_name = index_name
self._text_key = text_key
self._embedding_key = embedding_key
[docs] @classmethod
def from_connection_string(
cls,
connection_string: str,
namespace: str,
embedding: Embeddings,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
try:
from pymongo import MongoClient
except ImportError:
raise ImportError(
"Could not import pymongo, please install it with "
"`pip install pymongo`."
)
client: MongoClient = MongoClient(connection_string)
db_name, collection_name = namespace.split(".")
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None,
**kwargs: Any,
) -> List:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
|
fa0bf0c43f71-2
|
"""
batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List:
if not texts:
return []
# Embed and create the documents
embeddings = self._embedding.embed_documents(texts)
to_insert = [
{self._text_key: t, self._embedding_key: embedding, **m}
for t, m, embedding in zip(texts, metadatas, embeddings)
]
# insert the documents in MongoDB Atlas
insert_result = self._collection.insert_many(to_insert)
return insert_result.inserted_ids
[docs] def similarity_search_with_score(
self,
query: str,
*,
k: int = 4,
pre_filter: Optional[dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
) -> List[Tuple[Document, float]]:
"""Return MongoDB documents most similar to query, along with scores.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
|
fa0bf0c43f71-3
|
"""Return MongoDB documents most similar to query, along with scores.
Use the knnBeta Operator available in MongoDB Atlas Search
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we
may introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: Optional Number of Documents to return. Defaults to 4.
pre_filter: Optional Dictionary of argument(s) to prefilter on document
fields.
post_filter_pipeline: Optional Pipeline of MongoDB aggregation stages
following the knnBeta search.
Returns:
List of Documents most similar to the query and score for each
"""
knn_beta = {
"vector": self._embedding.embed_query(query),
"path": self._embedding_key,
"k": k,
}
if pre_filter:
knn_beta["filter"] = pre_filter
pipeline = [
{
"$search": {
"index": self._index_name,
"knnBeta": knn_beta,
}
},
{"$project": {"score": {"$meta": "searchScore"}, self._embedding_key: 0}},
]
if post_filter_pipeline is not None:
pipeline.extend(post_filter_pipeline)
cursor = self._collection.aggregate(pipeline)
docs = []
for res in cursor:
text = res.pop(self._text_key)
score = res.pop("score")
docs.append((Document(page_content=text, metadata=res), score))
return docs
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
|
fa0bf0c43f71-4
|
docs.append((Document(page_content=text, metadata=res), score))
return docs
[docs] def similarity_search(
self,
query: str,
k: int = 4,
pre_filter: Optional[dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return MongoDB documents most similar to query.
Use the knnBeta Operator available in MongoDB Atlas Search
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we may
introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: Optional Number of Documents to return. Defaults to 4.
pre_filter: Optional Dictionary of argument(s) to prefilter on document
fields.
post_filter_pipeline: Optional Pipeline of MongoDB aggregation stages
following the knnBeta search.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(
query,
k=k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
return [doc for doc, _ in docs_and_scores]
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection: Optional[Collection[MongoDBDocumentType]] = None,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
|
fa0bf0c43f71-5
|
collection: Optional[Collection[MongoDBDocumentType]] = None,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
"""Construct MongoDBAtlasVectorSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided MongoDB Atlas Vector Search index
(Lucene)
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from pymongo import MongoClient
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.embeddings import OpenAIEmbeddings
client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embeddings,
metadatas=metadatas,
collection=collection
)
"""
if collection is None:
raise ValueError("Must provide 'collection' named parameter.")
vecstore = cls(collection, embedding, **kwargs)
vecstore.add_texts(texts, metadatas=metadatas)
return vecstore
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/mongodb_atlas.html
|
f76a16c2f8cf-0
|
Source code for langchain.vectorstores.tair
"""Wrapper around Tair Vector."""
from __future__ import annotations
import json
import logging
import uuid
from typing import Any, Iterable, List, Optional, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger(__name__)
def _uuid_key() -> str:
return uuid.uuid4().hex
[docs]class Tair(VectorStore):
"""Wrapper around Tair Vector store."""
def __init__(
self,
embedding_function: Embeddings,
url: str,
index_name: str,
content_key: str = "content",
metadata_key: str = "metadata",
search_params: Optional[dict] = None,
**kwargs: Any,
):
self.embedding_function = embedding_function
self.index_name = index_name
try:
from tair import Tair as TairClient
except ImportError:
raise ImportError(
"Could not import tair python package. "
"Please install it with `pip install tair`."
)
try:
# connect to tair from url
client = TairClient.from_url(url, **kwargs)
except ValueError as e:
raise ValueError(f"Tair failed to connect: {e}")
self.client = client
self.content_key = content_key
self.metadata_key = metadata_key
self.search_params = search_params
[docs] def create_index_if_not_exist(
self,
dim: int,
distance_type: str,
index_type: str,
data_type: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
|
f76a16c2f8cf-1
|
index_type: str,
data_type: str,
**kwargs: Any,
) -> bool:
index = self.client.tvs_get_index(self.index_name)
if index is not None:
logger.info("Index already exists")
return False
self.client.tvs_create_index(
self.index_name,
dim,
distance_type,
index_type,
data_type,
**kwargs,
)
return True
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
ids = []
keys = kwargs.get("keys", None)
# Write data to tair
pipeline = self.client.pipeline(transaction=False)
embeddings = self.embedding_function.embed_documents(list(texts))
for i, text in enumerate(texts):
# Use provided key otherwise use default key
key = keys[i] if keys else _uuid_key()
metadata = metadatas[i] if metadatas else {}
pipeline.tvs_hset(
self.index_name,
key,
embeddings[i],
False,
**{
self.content_key: text,
self.metadata_key: json.dumps(metadata),
},
)
ids.append(key)
pipeline.execute()
return ids
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
|
f76a16c2f8cf-2
|
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
# Creates embedding vector from user query
embedding = self.embedding_function.embed_query(query)
keys_and_scores = self.client.tvs_knnsearch(
self.index_name, k, embedding, False, None, **kwargs
)
pipeline = self.client.pipeline(transaction=False)
for key, _ in keys_and_scores:
pipeline.tvs_hmget(
self.index_name, key, self.metadata_key, self.content_key
)
docs = pipeline.execute()
return [
Document(
page_content=d[1],
metadata=json.loads(d[0]),
)
for d in docs
]
[docs] @classmethod
def from_texts(
cls: Type[Tair],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: str = "langchain",
content_key: str = "content",
metadata_key: str = "metadata",
**kwargs: Any,
) -> Tair:
try:
from tair import tairvector
except ImportError:
raise ValueError(
"Could not import tair python package. "
"Please install it with `pip install tair`."
)
url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
|
f76a16c2f8cf-3
|
if "tair_url" in kwargs:
kwargs.pop("tair_url")
distance_type = tairvector.DistanceMetric.InnerProduct
if "distance_type" in kwargs:
distance_type = kwargs.pop("distance_typ")
index_type = tairvector.IndexType.HNSW
if "index_type" in kwargs:
index_type = kwargs.pop("index_type")
data_type = tairvector.DataType.Float32
if "data_type" in kwargs:
data_type = kwargs.pop("data_type")
index_params = {}
if "index_params" in kwargs:
index_params = kwargs.pop("index_params")
search_params = {}
if "search_params" in kwargs:
search_params = kwargs.pop("search_params")
keys = None
if "keys" in kwargs:
keys = kwargs.pop("keys")
try:
tair_vector_store = cls(
embedding,
url,
index_name,
content_key=content_key,
metadata_key=metadata_key,
search_params=search_params,
**kwargs,
)
except ValueError as e:
raise ValueError(f"tair failed to connect: {e}")
# Create embeddings for documents
embeddings = embedding.embed_documents(texts)
tair_vector_store.create_index_if_not_exist(
len(embeddings[0]),
distance_type,
index_type,
data_type,
**index_params,
)
tair_vector_store.add_texts(texts, metadatas, keys=keys)
return tair_vector_store
[docs] @classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
|
f76a16c2f8cf-4
|
cls,
documents: List[Document],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: str = "langchain",
content_key: str = "content",
metadata_key: str = "metadata",
**kwargs: Any,
) -> Tair:
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts, embedding, metadatas, index_name, content_key, metadata_key, **kwargs
)
[docs] @staticmethod
def drop_index(
index_name: str = "langchain",
**kwargs: Any,
) -> bool:
"""
Drop an existing index.
Args:
index_name (str): Name of the index to drop.
Returns:
bool: True if the index is dropped successfully.
"""
try:
from tair import Tair as TairClient
except ImportError:
raise ValueError(
"Could not import tair python package. "
"Please install it with `pip install tair`."
)
url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
try:
if "tair_url" in kwargs:
kwargs.pop("tair_url")
client = TairClient.from_url(url=url, **kwargs)
except ValueError as e:
raise ValueError(f"Tair connection error: {e}")
# delete index
ret = client.tvs_del_index(index_name)
if ret == 0:
# index not exist
logger.info("Index does not exist")
return False
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
|
f76a16c2f8cf-5
|
# index not exist
logger.info("Index does not exist")
return False
return True
[docs] @classmethod
def from_existing_index(
cls,
embedding: Embeddings,
index_name: str = "langchain",
content_key: str = "content",
metadata_key: str = "metadata",
**kwargs: Any,
) -> Tair:
"""Connect to an existing Tair index."""
url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL")
search_params = {}
if "search_params" in kwargs:
search_params = kwargs.pop("search_params")
return cls(
embedding,
url,
index_name,
content_key=content_key,
metadata_key=metadata_key,
search_params=search_params,
**kwargs,
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/tair.html
|
88c876174b1f-0
|
Source code for langchain.vectorstores.deeplake
"""Wrapper around Activeloop Deep Lake."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
try:
import deeplake
from deeplake.core.fast_forwarding import version_compare
from deeplake.core.vectorstore import DeepLakeVectorStore
_DEEPLAKE_INSTALLED = True
except ImportError:
_DEEPLAKE_INSTALLED = False
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
[docs]class DeepLake(VectorStore):
"""Wrapper around Deep Lake, a data lake for deep learning applications.
We integrated deeplake's similarity search and filtering for fast prototyping,
Now, it supports Tensor Query Language (TQL) for production use cases
over billion rows.
Why Deep Lake?
- Not only stores embeddings, but also the original data with version control.
- Serverless, doesn't require another service and can be used with major
cloud providers (S3, GCS, etc.)
- More than just a multi-modal vector store. You can use the dataset
to fine-tune your own LLM models.
To use, you should have the ``deeplake`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import DeepLake
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-1
|
vectorstore = DeepLake("langchain_store", embeddings.embed_query)
"""
_LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "./deeplake/"
def __init__(
self,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
token: Optional[str] = None,
embedding_function: Optional[Embeddings] = None,
read_only: bool = False,
ingestion_batch_size: int = 1000,
num_workers: int = 0,
verbose: bool = True,
exec_option: str = "python",
**kwargs: Any,
) -> None:
"""Creates an empty DeepLakeVectorStore or loads an existing one.
The DeepLakeVectorStore is located at the specified ``path``.
Examples:
>>> # Create a vector store with default tensors
>>> deeplake_vectorstore = DeepLake(
... path = <path_for_storing_Data>,
... )
>>>
>>> # Create a vector store in the Deep Lake Managed Tensor Database
>>> data = DeepLake(
... path = "hub://org_id/dataset_name",
... exec_option = "tensor_db",
... )
Args:
dataset_path (str): Path to existing dataset or where to create
a new one. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH.
token (str, optional): Activeloop token, for fetching credentials
to the dataset at path if it is a Deep Lake dataset.
Tokens are normally autogenerated. Optional.
embedding_function (str, optional): Function to convert
either documents or query. Optional.
read_only (bool): Open dataset in read-only mode. Default is False.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-2
|
read_only (bool): Open dataset in read-only mode. Default is False.
ingestion_batch_size (int): During data ingestion, data is divided
into batches. Batch size is the size of each batch.
Default is 1000.
num_workers (int): Number of workers to use during data ingestion.
Default is 0.
verbose (bool): Print dataset summary after each operation.
Default is True.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching - "python", "compute_engine", "tensor_db".
Default is "python".
- ``python`` - Pure-python implementation that runs on the client.
WARNING: using this with big datasets can lead to memory
issues. Data can be stored anywhere.
- ``compute_engine`` - C++ implementation of the Deep Lake Compute
Engine that runs on the client. Can be used for any data stored in
or connected to Deep Lake. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database that is
responsible for storage and query execution. Only for data stored in
the Deep Lake Managed Database. Use runtime = {"db_engine": True} during
dataset creation.
**kwargs: Other optional keyword arguments.
Raises:
ValueError: If some condition is not met.
"""
self.ingestion_batch_size = ingestion_batch_size
self.num_workers = num_workers
self.verbose = verbose
if _DEEPLAKE_INSTALLED is False:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-3
|
"Please install it with `pip install deeplake`."
)
if version_compare(deeplake.__version__, "3.6.2") == -1:
raise ValueError(
"deeplake version should be >= 3.6.3, but you've installed"
f" {deeplake.__version__}. Consider upgrading deeplake version \
pip install --upgrade deeplake."
)
self.dataset_path = dataset_path
self.vectorstore = DeepLakeVectorStore(
path=self.dataset_path,
embedding_function=embedding_function,
read_only=read_only,
token=token,
exec_option=exec_option,
verbose=verbose,
**kwargs,
)
self._embedding_function = embedding_function
self._id_tensor_name = "ids" if "ids" in self.vectorstore.tensors() else "id"
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Examples:
>>> ids = deeplake_vectorstore.add_texts(
... texts = <list_of_texts>,
... metadatas = <list_of_metadata_jsons>,
... ids = <list_of_ids>,
... )
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-4
|
ids (Optional[List[str]], optional): Optional list of IDs.
**kwargs: other optional keyword arguments.
Returns:
List[str]: List of IDs of the added texts.
"""
kwargs = {}
if ids:
if self._id_tensor_name == "ids": # for backwards compatibility
kwargs["ids"] = ids
else:
kwargs["id"] = ids
if metadatas is None:
metadatas = [{}] * len(list(texts))
return self.vectorstore.add(
text=texts,
metadata=metadatas,
embedding_data=texts,
embedding_tensor="embedding",
embedding_function=kwargs.get("embedding_function")
or self._embedding_function.embed_documents, # type: ignore
return_ids=True,
**kwargs,
)
def _search_tql(
self,
tql_query: Optional[str],
exec_option: Optional[str] = None,
return_score: bool = False,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""Function for performing tql_search.
Args:
tql_query (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-5
|
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
List[Document] - A list of documents
Raises:
ValueError: If return_score is True but some condition is not met.
"""
result = self.vectorstore.search(
query=tql_query,
exec_option=exec_option,
)
metadatas = result["metadata"]
texts = result["text"]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if return_score:
raise ValueError("scores can't be returned with tql search")
return docs
def _search(
self,
query: Optional[str] = None,
embedding: Optional[Union[List[float], np.ndarray]] = None,
embedding_function: Optional[Callable] = None,
k: int = 4,
distance_metric: str = "L2",
use_maximal_marginal_relevance: bool = False,
fetch_k: Optional[int] = 20,
filter: Optional[Union[Dict, Callable]] = None,
return_score: bool = False,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> Any[List[Document], List[Tuple[Document, float]]]:
"""
Return docs similar to query.
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-6
|
"""
Return docs similar to query.
Args:
query (str, optional): Text to look up similar docs.
embedding (Union[List[float], np.ndarray], optional): Query's embedding.
embedding_function (Callable, optional): Function to convert `query`
into embedding.
k (int): Number of Documents to return.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear, `max`
for L-infinity distance, `cos` for cosine similarity, 'dot' for dot
product.
filter (Union[Dict, Callable], optional): Additional filter prior
to the embedding search.
- ``Dict`` - Key-value search on tensors of htype json, on an
AND basis (a sample must satisfy all key-value filters to be True)
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with `deeplake.filter`.
use_maximal_marginal_relevance (bool): Use maximal marginal relevance.
fetch_k (int): Number of Documents for MMR algorithm.
return_score (bool): Return the score.
exec_option (str, optional): Supports 3 ways to perform searching.
Could be "python", "compute_engine" or "tensor_db".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-7
|
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
**kwargs: Additional keyword arguments.
Returns:
List of Documents by the specified distance metric,
if return_score True, return a tuple of (Document, score)
Raises:
ValueError: if both `embedding` and `embedding_function` are not specified.
"""
if kwargs.get("tql_query"):
return self._search_tql(
tql_query=kwargs["tql_query"],
exec_option=exec_option,
return_score=return_score,
)
if embedding_function:
if isinstance(embedding_function, Embeddings):
_embedding_function = embedding_function.embed_query
else:
_embedding_function = embedding_function
elif self._embedding_function:
_embedding_function = self._embedding_function.embed_query
else:
_embedding_function = None
if embedding is None:
if _embedding_function is None:
raise ValueError(
"Either `embedding` or `embedding_function` needs to be"
" specified."
)
embedding = _embedding_function(query) if query else None
if isinstance(embedding, list):
embedding = np.array(embedding, dtype=np.float32)
if len(embedding.shape) > 1:
embedding = embedding[0]
result = self.vectorstore.search(
embedding=embedding,
k=fetch_k if use_maximal_marginal_relevance else k,
distance_metric=distance_metric,
filter=filter,
exec_option=exec_option,
return_tensors=["embedding", "metadata", "text"],
)
scores = result["score"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-8
|
)
scores = result["score"]
embeddings = result["embedding"]
metadatas = result["metadata"]
texts = result["text"]
if use_maximal_marginal_relevance:
lambda_mult = kwargs.get("lambda_mult", 0.5)
indices = maximal_marginal_relevance( # type: ignore
embedding, # type: ignore
embeddings,
k=min(k, len(texts)),
lambda_mult=lambda_mult,
)
scores = [scores[i] for i in indices]
texts = [texts[i] for i in indices]
metadatas = [metadatas[i] for i in indices]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
if return_score:
return [(doc, score) for doc, score in zip(docs, scores)]
return docs
[docs] def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to query.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search(
... query=<your_query>,
... k=<num_items>,
... exec_option=<preferred_exec_option>,
... )
>>> # Run tql search:
>>> data = vector_store.tql_search(
... tql_query="SELECT * WHERE id == <id>",
... exec_option="compute_engine",
... )
Args:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-9
|
... exec_option="compute_engine",
... )
Args:
k (int): Number of Documents to return. Defaults to 4.
query (str): Text to look up similar documents.
**kwargs: Additional keyword arguments include:
embedding (Callable): Embedding function to use. Defaults to None.
distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max'
for L-infinity, 'cos' for cosine, 'dot' for dot product.
Defaults to 'L2'.
filter (Union[Dict, Callable], optional): Additional filter
before embedding search.
- Dict: Key-value search on tensors of htype json,
(sample must satisfy all key-value filters)
Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}}
- Function: Compatible with `deeplake.filter`.
Defaults to None.
exec_option (str): Supports 3 ways to perform searching.
'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'.
- 'python': Pure-python implementation for the client.
WARNING: not recommended for big datasets.
- 'compute_engine': C++ implementation of the Compute Engine for
the client. Not for in-memory or local datasets.
- 'tensor_db': Managed Tensor Database for storage and query.
Only for data in Deep Lake Managed Database.
Use `runtime = {"db_engine": True}` during dataset creation.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
query=query,
k=k,
use_maximal_marginal_relevance=False,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-10
|
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
[docs] def similarity_search_by_vector(
self,
embedding: Union[List[float], np.ndarray],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to embedding vector.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search_by_vector(
... embedding=<your_embedding>,
... k=<num_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding (Union[List[float], np.ndarray]):
Embedding to find similar docs.
k (int): Number of Documents to return. Defaults to 4.
**kwargs: Additional keyword arguments including:
filter (Union[Dict, Callable], optional):
Additional filter before embedding search.
- ``Dict`` - Key-value search on tensors of htype json. True
if all key-value filters are satisfied.
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with
`deeplake.filter`.
Defaults to None.
exec_option (str): Options for search execution include
"python", "compute_engine", or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-11
|
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database.
To store datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear,
`max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(
embedding=embedding,
k=k,
use_maximal_marginal_relevance=False,
return_score=False,
**kwargs,
)
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Run similarity search with Deep Lake with distance returned.
Examples:
>>> data = vector_store.similarity_search_with_score(
... query=<your_query>,
... embedding=<your_embedding_function>
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
query (str): Query text to search for.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-12
|
... )
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
**kwargs: Additional keyword arguments. Some of these arguments are:
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
embedding_function (Callable): Embedding function to use. Defaults
to None.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching. It could be either "python", "compute_engine" or
"tensor_db". Defaults to "python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float."""
return self._search(
query=query,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-13
|
text with distance in float."""
return self._search(
query=query,
k=k,
return_score=True,
**kwargs,
)
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""
Return docs selected using the maximal marginal relevance. Maximal marginal
relevance optimizes for similarity to query AND diversity among selected docs.
Examples:
>>> data = vector_store.max_marginal_relevance_search_by_vector(
... embedding=<your_embedding>,
... fetch_k=<elements_to_fetch_before_mmr_search>,
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch for MMR algorithm.
lambda_mult: Number between 0 and 1 determining the degree of diversity.
0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5.
exec_option (str): DeepLakeVectorStore supports 3 ways for searching.
Could be "python", "compute_engine" or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-14
|
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
**kwargs: Additional keyword arguments.
Returns:
List[Documents] - A list of documents.
"""
return self._search(
embedding=embedding,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
**kwargs,
)
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
exec_option: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Examples:
>>> # Search using an embedding
>>> data = vector_store.max_marginal_relevance_search(
... query = <query_to_search>,
... embedding_function = <embedding_function_for_query>,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-15
|
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents for MMR algorithm.
lambda_mult: Value between 0 and 1. 0 corresponds
to maximum diversity and 1 to minimum.
Defaults to 0.5.
exec_option (str): Supports 3 ways to perform searching.
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database. To store
datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
**kwargs: Additional keyword arguments
Returns:
List of Documents selected by maximal marginal relevance.
Raises:
ValueError: when MRR search is on but embedding function is
not specified.
"""
embedding_function = kwargs.get("embedding") or self._embedding_function
if embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on"
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-16
|
"For MMR search, you must specify an embedding function on"
" `creation` or during add call."
)
return self._search(
query=query,
k=k,
fetch_k=fetch_k,
use_maximal_marginal_relevance=True,
lambda_mult=lambda_mult,
exec_option=exec_option,
embedding_function=embedding_function, # type: ignore
**kwargs,
)
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH,
**kwargs: Any,
) -> DeepLake:
"""Create a Deep Lake dataset from a raw documents.
If a dataset_path is specified, the dataset will be persisted in that location,
otherwise by default at `./deeplake`
Examples:
>>> # Search using an embedding
>>> vector_store = DeepLake.from_texts(
... texts = <the_texts_that_you_want_to_embed>,
... embedding_function = <embedding_function_for_query>,
... k = <number_of_items_to_return>,
... exec_option = <preferred_exec_option>,
... )
Args:
dataset_path (str): - The full path to the dataset. Can be:
- Deep Lake cloud path of the form ``hub://username/dataset_name``.
To write to Deep Lake cloud datasets,
ensure that you are logged in to Deep Lake
(use 'activeloop login' from command line)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-17
|
(use 'activeloop login' from command line)
- AWS S3 path of the form ``s3://bucketname/path/to/dataset``.
Credentials are required in either the environment
- Google Cloud Storage path of the form
``gcs://bucketname/path/to/dataset`` Credentials are required
in either the environment
- Local file system path of the form ``./path/to/dataset`` or
``~/path/to/dataset`` or ``path/to/dataset``.
- In-memory path of the form ``mem://path/to/dataset`` which doesn't
save the dataset, but keeps it in memory instead.
Should be used only for testing as it does not persist.
texts (List[Document]): List of documents to add.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
Note, in other places, it is called embedding_function.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
**kwargs: Additional keyword arguments.
Returns:
DeepLake: Deep Lake dataset.
Raises:
ValueError: If 'embedding' is provided in kwargs. This is deprecated,
please use `embedding_function` instead.
"""
if kwargs.get("embedding"):
raise ValueError(
"using embedding as embedidng_functions is deprecated. "
"Please use `embedding_function` instead."
)
deeplake_dataset = cls(
dataset_path=dataset_path, embedding_function=embedding, **kwargs
)
deeplake_dataset.add_texts(
texts=texts,
metadatas=metadatas,
ids=ids,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
88c876174b1f-18
|
metadatas=metadatas,
ids=ids,
embedding_function=embedding.embed_documents, # type: ignore
)
return deeplake_dataset
[docs] def delete(
self,
ids: Any[List[str], None] = None,
filter: Any[Dict[str, str], None] = None,
delete_all: Any[bool, None] = None,
) -> bool:
"""Delete the entities in the dataset.
Args:
ids (Optional[List[str]], optional): The document_ids to delete.
Defaults to None.
filter (Optional[Dict[str, str]], optional): The filter to delete by.
Defaults to None.
delete_all (Optional[bool], optional): Whether to drop the dataset.
Defaults to None.
Returns:
bool: Whether the delete operation was successful.
"""
self.vectorstore.delete(
ids=ids,
filter=filter,
delete_all=delete_all,
)
return True
[docs] @classmethod
def force_delete_by_path(cls, path: str) -> None:
"""Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
"""
try:
import deeplake
except ImportError:
raise ValueError(
"Could not import deeplake python package. "
"Please install it with `pip install deeplake`."
)
deeplake.delete(path, large_ok=True, force=True)
[docs] def delete_dataset(self) -> None:
"""Delete the collection."""
self.delete(delete_all=True)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/deeplake.html
|
1958d10fb430-0
|
Source code for langchain.vectorstores.myscale
"""Wrapper around MyScale vector database."""
from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple
from pydantic import BaseSettings
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
[docs]def has_mul_sub_str(s: str, *args: Any) -> bool:
"""
Check if a string contains multiple substrings.
Args:
s: string to check.
*args: substrings to check.
Returns:
True if all substrings are in the string, False otherwise.
"""
for a in args:
if a not in s:
return False
return True
[docs]class MyScaleSettings(BaseSettings):
"""MyScale Client Configuration
Attribute:
myscale_host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
myscale_port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
index_type (str): index type string.
index_param (dict): index build parameter.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
metric (str) : Metric to compute distance,
supported are ('l2', 'cosine', 'ip'). Defaults to 'cosine'.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-1
|
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{
'id': 'text_id',
'vector': 'text_embedding',
'text': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 8443
username: Optional[str] = None
password: Optional[str] = None
index_type: str = "IVFFLAT"
index_param: Optional[Dict[str, str]] = None
column_map: Dict[str, str] = {
"id": "id",
"text": "text",
"vector": "vector",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain"
metric: str = "cosine"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
[docs] class Config:
env_file = ".env"
env_prefix = "myscale_"
env_file_encoding = "utf-8"
[docs]class MyScale(VectorStore):
"""Wrapper around MyScale vector database
You need a `clickhouse-connect` python package, and a valid account
to connect to MyScale.
MyScale can not only search with simple vector indexes,
it also supports complex query with multiple conditions,
constraints and even sub-queries.
For more information, please visit
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-2
|
constraints and even sub-queries.
For more information, please visit
[myscale official site](https://docs.myscale.com/en/overview/)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[MyScaleSettings] = None,
**kwargs: Any,
) -> None:
"""MyScale Wrapper to LangChain
embedding_function (Embeddings):
config (MyScaleSettings): Configuration to MyScale Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ValueError(
"Could not import clickhouse connect python package. "
"Please install it with `pip install clickhouse-connect`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = MyScaleSettings()
assert self.config
assert self.config.host and self.config.port
assert (
self.config.column_map
and self.config.database
and self.config.table
and self.config.metric
)
for k in ["id", "vector", "text", "metadata"]:
assert k in self.config.column_map
assert self.config.metric in ["ip", "cosine", "l2"]
# initialize the schema
dim = len(embedding.embed_query("try this out"))
index_params = (
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-3
|
dim = len(embedding.embed_query("try this out"))
index_params = (
", " + ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()])
if self.config.index_param
else ""
)
schema_ = f"""
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} String,
{self.config.column_map['text']} String,
{self.config.column_map['vector']} Array(Float32),
{self.config.column_map['metadata']} JSON,
CONSTRAINT cons_vec_len CHECK length(\
{self.config.column_map['vector']}) = {dim},
VECTOR INDEX vidx {self.config.column_map['vector']} \
TYPE {self.config.index_type}(\
'metric_type={self.config.metric}'{index_params})
) ENGINE = MergeTree ORDER BY {self.config.column_map['id']}
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self.embedding_function = embedding.embed_query
self.dist_order = "ASC" if self.config.metric in ["cosine", "l2"] else "DESC"
# Create a connection to myscale
self.client = get_client(
host=self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
**kwargs,
)
self.client.command("SET allow_experimental_object_type=1")
self.client.command(schema_)
[docs] def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-4
|
def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str:
ks = ",".join(column_names)
_data = []
for n in transac:
n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n])
_data.append(f"({n})")
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
_i_str = self._build_istr(transac, column_names)
self.client.command(_i_str)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 32,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {
colmap_["id"]: ids,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-5
|
column_names = {
colmap_["id"]: ids,
colmap_["text"]: texts,
colmap_["vector"]: map(self.embedding_function, texts),
}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_["metadata"]] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(
zip(*values), desc="Inserting data...", total=len(metadatas)
):
assert len(v[keys.index(self.config.column_map["vector"])]) == self.dim
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
config: Optional[MyScaleSettings] = None,
text_ids: Optional[Iterable[str]] = None,
batch_size: int = 32,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-6
|
batch_size: int = 32,
**kwargs: Any,
) -> MyScale:
"""Create Myscale wrapper with existing texts
Args:
embedding_function (Embeddings): Function to extract text embedding
texts (Iterable[str]): List or tuple of strings to be added
config (MyScaleSettings, Optional): Myscale configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to MyScale.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
MyScale Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
def __repr__(self) -> str:
"""Text representation for myscale, prints backends, username and schemas.
Easy to use with `str(Myscale())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ "
_repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n"
_repr += "-" * 51 + "\n"
for r in self.client.query(
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-7
|
for r in self.client.query(
f"DESC {self.config.database}.{self.config.table}"
).named_results():
_repr += (
f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n"
)
_repr += "-" * 51 + "\n"
return _repr
def _build_qstr(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
else:
where_str = ""
q_str = f"""
SELECT {self.config.column_map['text']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk}
"""
return q_str
[docs] def similarity_search(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-8
|
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(
self.embedding_function(query), k, where_str, **kwargs
)
[docs] def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with MyScale by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_qstr(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-9
|
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
[docs] def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with MyScale
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents most similar to the query text
and cosine distance in float for each.
Lower score represents more similarity.
"""
q_str = self._build_qstr(self.embedding_function(query), k, where_str)
try:
return [
(
Document(
page_content=r[self.config.column_map["text"]],
metadata=r[self.config.column_map["metadata"]],
),
r["dist"],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
1958d10fb430-10
|
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
[docs] def drop(self) -> None:
"""
Helper function: Drop data
"""
self.client.command(
f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}"
)
@property
def metadata_column(self) -> str:
return self.config.column_map["metadata"]
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
|
27c778c1385a-0
|
Source code for langchain.vectorstores.analyticdb
"""VectorStore wrapper around a Postgres/PGVector database."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Type
from sqlalchemy import REAL, Column, String, Table, create_engine, insert, text
from sqlalchemy.dialects.postgresql import ARRAY, JSON, TEXT
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
_LANGCHAIN_DEFAULT_EMBEDDING_DIM = 1536
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain_document"
Base = declarative_base() # type: Any
[docs]class AnalyticDB(VectorStore):
"""VectorStore implementation using AnalyticDB.
AnalyticDB is a distributed full PostgresSQL syntax cloud-native database.
- `connection_string` is a postgres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `collection_name` is the name of the collection to use. (default: langchain)
- NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
- `pre_delete_collection` if True, will delete the collection if it exists.
(default: False)
- Useful for testing.
"""
def __init__(
self,
connection_string: str,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
|
27c778c1385a-1
|
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.embedding_dimension = embedding_dimension
self.collection_name = collection_name
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
def __post_init__(
self,
) -> None:
"""
Initialize the store.
"""
self.engine = create_engine(self.connection_string)
self.create_collection()
[docs] def create_table_if_not_exists(self) -> None:
# Define the dynamic table
Table(
self.collection_name,
Base.metadata,
Column("id", TEXT, primary_key=True, default=uuid.uuid4),
Column("embedding", ARRAY(REAL)),
Column("document", String, nullable=True),
Column("metadata", JSON, nullable=True),
extend_existing=True,
)
with self.engine.connect() as conn:
with conn.begin():
# Create the table
Base.metadata.create_all(conn)
# Check if the index exists
index_name = f"{self.collection_name}_embedding_idx"
index_query = text(
f"""
SELECT 1
FROM pg_indexes
WHERE indexname = '{index_name}';
"""
)
result = conn.execute(index_query).scalar()
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
|
27c778c1385a-2
|
"""
)
result = conn.execute(index_query).scalar()
# Create the index if it doesn't exist
if not result:
index_statement = text(
f"""
CREATE INDEX {index_name}
ON {self.collection_name} USING ann(embedding)
WITH (
"dim" = {self.embedding_dimension},
"hnsw_m" = 100
);
"""
)
conn.execute(index_statement)
[docs] def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
self.create_table_if_not_exists()
[docs] def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
drop_statement = text(f"DROP TABLE IF EXISTS {self.collection_name};")
with self.engine.connect() as conn:
with conn.begin():
conn.execute(drop_statement)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
|
27c778c1385a-3
|
if not metadatas:
metadatas = [{} for _ in texts]
# Define the table schema
chunks_table = Table(
self.collection_name,
Base.metadata,
Column("id", TEXT, primary_key=True),
Column("embedding", ARRAY(REAL)),
Column("document", String, nullable=True),
Column("metadata", JSON, nullable=True),
extend_existing=True,
)
chunks_table_data = []
with self.engine.connect() as conn:
with conn.begin():
for document, metadata, chunk_id, embedding in zip(
texts, metadatas, ids, embeddings
):
chunks_table_data.append(
{
"id": chunk_id,
"embedding": embedding,
"document": document,
"metadata": metadata,
}
)
# Execute the batch insert when the batch size is reached
if len(chunks_table_data) == batch_size:
conn.execute(insert(chunks_table).values(chunks_table_data))
# Clear the chunks_table_data list for the next batch
chunks_table_data.clear()
# Insert any remaining records that didn't make up a full batch
if chunks_table_data:
conn.execute(insert(chunks_table).values(chunks_table_data))
return ids
[docs] def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with AnalyticDB with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
|
27c778c1385a-4
|
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
**kwargs: kwargs to be passed to similarity search. Should include:
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
|
27c778c1385a-5
|
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
return self.similarity_search_with_score(query, k, **kwargs)
[docs] def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
# Add the filter if provided
try:
from sqlalchemy.engine import Row
except ImportError:
raise ImportError(
"Could not import Row from sqlalchemy.engine. "
"Please 'pip install sqlalchemy>=1.4'."
)
filter_condition = ""
if filter is not None:
conditions = [
f"metadata->>{key!r} = {value!r}" for key, value in filter.items()
]
filter_condition = f"WHERE {' AND '.join(conditions)}"
# Define the base query
sql_query = f"""
SELECT *, l2_distance(embedding, :embedding) as distance
FROM {self.collection_name}
{filter_condition}
ORDER BY embedding <-> :embedding
LIMIT :k
"""
# Set up the query parameters
params = {"embedding": embedding, "k": k}
# Execute the query and fetch the results
with self.engine.connect() as conn:
results: Sequence[Row] = conn.execute(text(sql_query), params).fetchall()
documents_with_scores = [
(
Document(
page_content=result.document,
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
|
27c778c1385a-6
|
(
Document(
page_content=result.document,
metadata=result.metadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return documents_with_scores
[docs] def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
[docs] @classmethod
def from_texts(
cls: Type[AnalyticDB],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> AnalyticDB:
"""
Return VectorStore initialized from texts and embeddings.
Postgres Connection string is required
Either pass it as a parameter
or set the PG_CONNECTION_STRING environment variable.
"""
|
https://api.python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.