id stringlengths 14 16 | text stringlengths 44 2.73k | source stringlengths 49 114 |
|---|---|---|
1ae7f74e8fd6-6 | 2. Initializes the Qdrant database as an in-memory docstore by default
(and overridable to a remote docstore)
3. Adds the text embeddings to the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
1ae7f74e8fd6-7 | ),
)
# Now generate the embeddings for all the texts
embeddings = embedding.embed_documents(texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch.construct(
ids=[md5(text.encode("utf-8")).hexdigest() for text in texts],
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
1ae7f74e8fd6-8 | metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
def _qdrant_filter_from_dict(self, filter: Optional[MetadataFilter]) -> Any:
if ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/qdrant.html |
95f2afed514f-0 | Source code for langchain.vectorstores.supabase
from __future__ import annotations
from itertools import repeat
from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.embe... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
95f2afed514f-1 | embedding: Embeddings,
table_name: str,
query_name: Union[str, None] = None,
) -> None:
"""Initialize with supabase client."""
try:
import supabase # noqa: F401
except ImportError:
raise ValueError(
"Could not import supabase python pa... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
95f2afed514f-2 | if not table_name:
raise ValueError("Supabase document table_name is required.")
embeddings = embedding.embed_documents(texts)
docs = cls._texts_to_documents(texts, metadatas)
_ids = cls._add_vectors(client, table_name, embeddings, docs)
return cls(
client=client,... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
95f2afed514f-3 | self, query: List[float], k: int
) -> List[Tuple[Document, float]]:
match_documents_params = dict(query_embedding=query, match_count=k)
res = self._client.rpc(self.query_name, match_documents_params).execute()
match_result = [
(
Document(
metad... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
95f2afed514f-4 | metadatas: Optional[Iterable[dict[Any, Any]]] = None,
) -> List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [
Document(page_content=text, metadata=metadata)
for text, metad... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
95f2afed514f-5 | return id_list
[docs] def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
95f2afed514f-6 | k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
95f2afed514f-7 | $$;```
"""
embedding = self._embedding.embed_documents([query])
docs = self.max_marginal_relevance_search_by_vector(
embedding[0], k, fetch_k, lambda_mult=lambda_mult
)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/supabase.html |
a2713e9c0aa0-0 | Source code for langchain.vectorstores.chroma
"""Wrapper around ChromaDB embeddings platform."""
from __future__ import annotations
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
import numpy as np
from langchain.docstore.document import Document
from langc... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-1 | """
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
def __init__(
self,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-2 | self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
) -> List[Document]:
"""Query the chroma collection."""
for i in range(n_results, 0, -1):
try:
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-3 | ids = [str(uuid.uuid1()) for _ in texts]
embeddings = None
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(texts))
self._collection.add(
metadatas=metadatas, embeddings=embeddings, documents=texts, ids=ids
)
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-4 | """
results = self.__query_collection(
query_embeddings=embedding, n_results=k, where=filter
)
return _results_to_docs(results)
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
*... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-5 | ) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documen... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-6 | **kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-7 | "creation to persist the collection."
)
self._client.persist()
[docs] def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document)... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-8 | ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embed... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
a2713e9c0aa0-9 | client_settings (Optional[chromadb.config.Settings]): Chroma client settings
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/chroma.html |
2af6a0e52863-0 | Source code for langchain.vectorstores.atlas
"""Wrapper around Atlas by Nomic."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
2af6a0e52863-1 | is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally userful during development and testing.
"""
try:
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
2af6a0e52863-2 | metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]]): An optional list of ids.
refresh(bool): Whether or not to refresh indices with the updated data.
Default True.
Returns:
List[str]: List of IDs of the added texts... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
2af6a0e52863-3 | else:
if metadatas is None:
data = [
{"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]}
for i, text in enumerate(texts)
]
else:
for i, text in enumerate(texts):
metadatas[i]["text"] =... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
2af6a0e52863-4 | """
if self._embedding_function is None:
raise NotImplementedError(
"AtlasDB requires an embedding_function for text similarity search!"
)
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
2af6a0e52863-5 | ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
2af6a0e52863-6 | ids: Optional[List[str]] = None,
name: Optional[str] = None,
api_key: Optional[str] = None,
persist_directory: Optional[str] = None,
description: str = "A description for your project",
is_public: bool = True,
reset_project_if_exists: bool = False,
index_kwargs: O... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
2af6a0e52863-7 | return cls.from_texts(
name=name,
api_key=api_key,
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
description=description,
is_public=is_public,
reset_project_if_exists=reset_project_if_exists,
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/atlas.html |
d9df88db7060-0 | Source code for langchain.vectorstores.opensearch_vector_search
"""Wrapper around OpenSearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Dict, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from la... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-1 | f"Got error: {e} "
)
return client
def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None:
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError("Embeddings size is zero")
if bulk_size < embeddings_length:
ra... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-2 | vector_field: str = "vector_field",
) -> Dict:
"""For Painless Scripting or Script Scoring,the default mapping to create index."""
return {
"mappings": {
"properties": {
vector_field: {"type": "knn_vector", "dimension": dim},
}
}
}
def _default_text_ma... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-3 | return {
"size": size,
"query": {"knn": {vector_field: {"vector": query_vector, "k": k}}},
}
def _approximate_search_query_with_boolean_filter(
query_vector: List[float],
boolean_filter: Dict,
size: int = 4,
k: int = 4,
vector_field: str = "vector_field",
subquery_clause: str... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-4 | """For Script Scoring Search, this is the default query."""
return {
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": "knn_score",
"lang": "knn",
"params": {
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-5 | "field": vector_field,
"query_value": query_vector,
},
},
}
}
}
def _get_kwargs_value(kwargs: Any, key: str, default_value: Any) -> Any:
"""Get the value of the key if present. Else get the default_value."""
if key in kwargs:
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-6 | bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the do... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-7 | "metadata".
Can be set to a special value "*" to include the entire document.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
size: number of results the query actually returns; default: 4
boolean_filter: A Bo... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-8 | metadata_field = _get_kwargs_value(kwargs, "metadata_field", "metadata")
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field")
if search_type == "approximate_search":
size = _get_kwargs_value(kwargs, "size", 4)
boolean_filter = _get_kwargs_value(kwargs, "boolea... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-9 | pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY)
search_query = _default_painless_scripting_query(
embedding, space_type, pre_filter, vector_field
)
else:
raise ValueError("Invalid `search_type` provided as an argument")
response =... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-10 | vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nm... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-11 | _validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
# Get the index name from either from kwargs or ENV Variable
# before falling back to random generation
index_name = get_from_dict_or_env(
kwargs, "index_name", "OPENSEARCH_INDEX_NAME", de... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
d9df88db7060-12 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 25, 2023. | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html |
90d5a42111da-0 | Source code for langchain.vectorstores.analyticdb
"""VectorStore wrapper around a Postgres/PGVector database."""
from __future__ import annotations
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple
import sqlalchemy
from sqlalchemy import REAL, Index
from sqlalchemy.dialects.postg... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-1 | """
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-2 | - `connection_string` is a postgres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `collection_name` is the name of the collection to use. (default: langchain)
- NOTE: This is not the name of the table, but the na... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-3 | return conn
[docs] def create_tables_if_not_exists(self) -> None:
Base.metadata.create_all(self._conn)
[docs] def drop_tables(self) -> None:
Base.metadata.drop_all(self._conn)
[docs] def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-4 | embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-5 | """Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to th... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-6 | EmbeddingStore.collection_id == CollectionStore.uuid,
)
.limit(k)
.all()
)
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
)... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-7 | Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
co... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
90d5a42111da-8 | metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html |
2b9cea1aac04-0 | Source code for langchain.vectorstores.faiss
"""Wrapper around FAISS vector database."""
from __future__ import annotations
import math
import pickle
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.base import Addabl... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-1 | [docs]class FAISS(VectorStore):
"""Wrapper around FAISS vector database.
To use, you should have the ``faiss`` python package installed.
Example:
.. code-block:: python
from langchain import FAISS
faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id)
""... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-2 | starting_len = len(self.index_to_docstore_id)
self.index.add(np.array(embeddings, dtype=np.float32))
# Get list of index, id, and docs.
full_info = [
(starting_len + i, str(uuid.uuid4()), doc)
for i, doc in enumerate(documents)
]
# Add information to docst... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-3 | self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-4 | # This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-5 | """Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(quer... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-6 | np.array([embedding], dtype=np.float32),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
selected_indices = [indices[0][i] for i in mmr_selected]
docs = []
for i in selected_indices:
if i == -1:
# This happens when not enough do... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-7 | embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
[docs] def merge_from(self, target: FAISS) -> None:
"""Merge another FAISS object with the current one.
Add the target FAISS to the current one.
Args:
target: FAISS object you wish to merge into the curre... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-8 | ) -> FAISS:
faiss = dependable_faiss_import()
index = faiss.IndexFlatL2(len(embeddings[0]))
index.add(np.array(embeddings, dtype=np.float32))
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Docu... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-9 | metadatas,
**kwargs,
)
[docs] @classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> FAISS:
"""Construct FAISS wrapper from ra... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-10 | path = Path(folder_path)
path.mkdir(exist_ok=True, parents=True)
# save index separately since it is not picklable
faiss = dependable_faiss_import()
faiss.write_index(
self.index, str(path / "{index_name}.faiss".format(index_name=index_name))
)
# save docstore... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
2b9cea1aac04-11 | self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and their similarity scores on a scale from 0 to 1."""
if self.relevance_score_fn is None:
raise ValueError(
"normalize_score_fn must be provided to"
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html |
46ec4d20b726-0 | Source code for langchain.vectorstores.milvus
"""Wrapper around the Milvus vector database."""
from __future__ import annotations
import logging
from typing import Any, Iterable, List, Optional, Tuple, Union
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.embedd... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-1 | The connection args used for this class comes in the form of a dict,
here are a few of the options:
address (str): The actual address of Milvus
instance. Example address: "localhost:19530"
uri (str): The uri of Milvus instance. Example uri:
"http://randomw... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-2 | Args:
embedding_function (Embeddings): Function used to embed the text.
collection_name (str): Which Milvus collection to use. Defaults to
"LangChainCollection".
connection_args (Optional[dict[str, any]]): The arguments for connection to
Milvus/Zilliz ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-3 | "RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": "L2", "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type"... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-4 | if drop_old and isinstance(self.col, Collection):
self.col.drop()
self.col = None
# Initialize the vector store
self._init()
def _create_connection_alias(self, connection_args: dict) -> str:
"""Create the connection to the Milvus server."""
from pymilvus impor... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-5 | and ("user" in addr)
and (addr["user"] == tmp_user)
):
logger.debug("Using previous connection: %s", con[0])
return con[0]
# Generate a new connection if one doesnt exist
alias = uuid4().hex
try:
connections.... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-6 | # Datatype isnt compatible
if dtype == DataType.UNKNOWN or dtype == DataType.NONE:
logger.error(
"Failure to create collection, unrecognized dtype for key: %s",
key,
)
raise ValueError(f"Unrecogni... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-7 | schema = self.col.schema
for x in schema.fields:
self.fields.append(x.name)
# Since primary field is auto-id, no need to track it
self.fields.remove(self._primary_field)
def _get_index(self) -> Optional[dict[str, Any]]:
"""Return the vector index informati... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-8 | using=self.alias,
)
logger.debug(
"Successfully created an index on collection: %s",
self.collection_name,
)
except MilvusException as e:
logger.error(
"Failed to create an index o... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-9 | embedding and the columns are decided by the first metadata dict.
Metada keys will need to be present for all inserted values. At
the moment there is no None equivalent in Milvus.
Args:
texts (Iterable[str]): The texts to embed, it is assumed
that they all fit in memo... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-10 | for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
# Total insert count
vectors: list = insert_dict[self._vector_field]
total_count = len(vectors)
pks: list[str] = []
assert isinstance(self... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-11 | Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document resul... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-12 | return []
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: O... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-13 | output_fields = self.fields[:]
output_fields.remove(self._vector_field)
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return res
[docs] def similarity_search_with_score_by_vector(
se... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-14 | # Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
# Perform the search.
res = self.col.search(
data=[embedding],
anns_field=self._vector_field,
param=param,
limit=k,
expr... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-15 | to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How lon... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-16 | lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specif... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-17 | output_fields=[self._primary_field, self._vector_field],
timeout=timeout,
)
# Reorganize the results from query to match search order.
vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors}
ordered_result_embeddings = [vectors[x] for x in ids]
# Ge... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
46ec4d20b726-18 | Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html |
39ecc05c1952-0 | Source code for langchain.vectorstores.annoy
"""Wrapper around Annoy vector database."""
from __future__ import annotations
import os
import pickle
import uuid
from configparser import ConfigParser
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from l... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-1 | ):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index = index
self.metric = metric
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
[docs] def add_texts(
self,
texts: Iterable[str... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-2 | Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-3 | k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-4 | Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_index(
docstore_index, k, search_k
)
return [doc for doc, _ in docs_and_scores]
[docs] def similarity_search(
self, query: str, k: int =... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-5 | of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
idxs = self.index.get_nns_by_vector(
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-6 | k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-7 | documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
docstore = InMemoryDocstore(
{inde... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-8 | from langchain import Annoy
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts, embedd... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-9 | embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
db = Annoy.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
em... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
39ecc05c1952-10 | Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries.
"""
path = Path(folder_path)
# load index separately since it is not picklable
annoy = dependable_annoy_im... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html |
bde48b47f63a-0 | Source code for langchain.vectorstores.weaviate
"""Wrapper around weaviate vector database."""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional, Type
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import ... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html |
bde48b47f63a-1 | return client
[docs]class Weaviate(VectorStore):
"""Wrapper around Weaviate vector database.
To use, you should have the ``weaviate-client`` python package installed.
Example:
.. code-block:: python
import weaviate
from langchain.vectorstores import Weaviate
clien... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html |
bde48b47f63a-2 | """Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html |
bde48b47f63a-3 | docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
[docs] def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html |
bde48b47f63a-4 | fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html |
bde48b47f63a-5 | """
vector = {"vector": embedding}
query_obj = self._client.query.get(self._index_name, self._query_attrs)
if kwargs.get("where_filter"):
query_obj = query_obj.with_where(kwargs.get("where_filter"))
results = (
query_obj.with_additional("vector")
.with... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html |
bde48b47f63a-6 | from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
weaviate = Weaviate.from_texts(
texts,
embeddings,
weaviate_url="http://localhost:8080"
)
"""
client = _create_wea... | https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.