index
int64
0
0
repo_id
stringclasses
596 values
file_path
stringlengths
31
168
content
stringlengths
1
6.2M
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/chroma.py
from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class ChromaTranslator(Visitor): """Translate `Chroma` internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, ] """Subset of allowed logical comparators.""" def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) return f"${func.value}" def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {self._format_func(operation.operator): args} def visit_comparison(self, comparison: Comparison) -> Dict: return { comparison.attribute: { self._format_func(comparison.comparator): comparison.value } } def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/hanavector.py
# HANA Translator/query constructor from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class HanaTranslator(Visitor): """ Translate internal query language elements to valid filters params for HANA vectorstore. """ allowed_operators = [Operator.AND, Operator.OR] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GT, Comparator.LT, Comparator.GTE, Comparator.LTE, Comparator.IN, Comparator.NIN, # Comparator.CONTAIN, Comparator.LIKE, ] def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) return f"${func.value}" def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {self._format_func(operation.operator): args} def visit_comparison(self, comparison: Comparison) -> Dict: return { comparison.attribute: { self._format_func(comparison.comparator): comparison.value } } def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/databricks_vector_search.py
from collections import ChainMap from itertools import chain from typing import Dict, Tuple from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) _COMPARATOR_TO_SYMBOL = { Comparator.EQ: "", Comparator.GT: " >", Comparator.GTE: " >=", Comparator.LT: " <", Comparator.LTE: " <=", Comparator.IN: "", Comparator.LIKE: " LIKE", } class DatabricksVectorSearchTranslator(Visitor): """Translate `Databricks vector search` internal query language elements to valid filters.""" """Subset of allowed logical operators.""" allowed_operators = [Operator.AND, Operator.NOT, Operator.OR] """Subset of allowed logical comparators.""" allowed_comparators = [ Comparator.EQ, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.IN, Comparator.LIKE, ] def _visit_and_operation(self, operation: Operation) -> Dict: return dict(ChainMap(*[arg.accept(self) for arg in operation.arguments])) def _visit_or_operation(self, operation: Operation) -> Dict: filter_args = [arg.accept(self) for arg in operation.arguments] flattened_args = list( chain.from_iterable(filter_arg.items() for filter_arg in filter_args) ) return { " OR ".join(key for key, _ in flattened_args): [ value for _, value in flattened_args ] } def _visit_not_operation(self, operation: Operation) -> Dict: if len(operation.arguments) > 1: raise ValueError( f'"{operation.operator.value}" can have only one argument ' f"in Databricks vector search" ) filter_arg = operation.arguments[0].accept(self) return { f"{colum_with_bool_expression} NOT": value for colum_with_bool_expression, value in filter_arg.items() } def visit_operation(self, operation: Operation) -> Dict: self._validate_func(operation.operator) if operation.operator == Operator.AND: return self._visit_and_operation(operation) elif operation.operator == Operator.OR: return self._visit_or_operation(operation) elif operation.operator == Operator.NOT: return self._visit_not_operation(operation) else: raise NotImplementedError( f'Operator "{operation.operator}" is not supported' ) def visit_comparison(self, comparison: Comparison) -> Dict: self._validate_func(comparison.comparator) comparator_symbol = _COMPARATOR_TO_SYMBOL[comparison.comparator] return {f"{comparison.attribute}{comparator_symbol}": comparison.value} def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/tencentvectordb.py
from __future__ import annotations from typing import Optional, Sequence, Tuple from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class TencentVectorDBTranslator(Visitor): """Translate StructuredQuery to Tencent VectorDB query.""" COMPARATOR_MAP = { Comparator.EQ: "=", Comparator.NE: "!=", Comparator.GT: ">", Comparator.GTE: ">=", Comparator.LT: "<", Comparator.LTE: "<=", Comparator.IN: "in", Comparator.NIN: "not in", } allowed_comparators: Optional[Sequence[Comparator]] = list(COMPARATOR_MAP.keys()) allowed_operators: Optional[Sequence[Operator]] = [ Operator.AND, Operator.OR, Operator.NOT, ] def __init__(self, meta_keys: Optional[Sequence[str]] = None): """Initialize the translator. Args: meta_keys: List of meta keys to be used in the query. Default: []. """ self.meta_keys = meta_keys or [] def visit_operation(self, operation: Operation) -> str: """Visit an operation node and return the translated query. Args: operation: Operation node to be visited. Returns: Translated query. """ if operation.operator in (Operator.AND, Operator.OR): ret = f" {operation.operator.value} ".join( [arg.accept(self) for arg in operation.arguments] ) if operation.operator == Operator.OR: ret = f"({ret})" return ret else: return f"not ({operation.arguments[0].accept(self)})" def visit_comparison(self, comparison: Comparison) -> str: """Visit a comparison node and return the translated query. Args: comparison: Comparison node to be visited. Returns: Translated query. """ if self.meta_keys and comparison.attribute not in self.meta_keys: raise ValueError( f"Expr Filtering found Unsupported attribute: {comparison.attribute}" ) if comparison.comparator in self.COMPARATOR_MAP: if comparison.comparator in [Comparator.IN, Comparator.NIN]: value = map( lambda x: f'"{x}"' if isinstance(x, str) else x, comparison.value ) return ( f"{comparison.attribute}" f" {self.COMPARATOR_MAP[comparison.comparator]} " f"({', '.join(value)})" ) if isinstance(comparison.value, str): return ( f"{comparison.attribute} " f"{self.COMPARATOR_MAP[comparison.comparator]}" f' "{comparison.value}"' ) return ( f"{comparison.attribute}" f" {self.COMPARATOR_MAP[comparison.comparator]} " f"{comparison.value}" ) else: raise ValueError(f"Unsupported comparator {comparison.comparator}") def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: """Visit a structured query node and return the translated query. Args: structured_query: StructuredQuery node to be visited. Returns: Translated query and query kwargs. """ if structured_query.filter is None: kwargs = {} else: kwargs = {"expr": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/mongodb_atlas.py
"""Logic for converting internal query language to a valid MongoDB Atlas query.""" from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) MULTIPLE_ARITY_COMPARATORS = [Comparator.IN, Comparator.NIN] class MongoDBAtlasTranslator(Visitor): """Translate Mongo internal query language elements to valid filters.""" """Subset of allowed logical comparators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.IN, Comparator.NIN, ] """Subset of allowed logical operators.""" allowed_operators = [Operator.AND, Operator.OR] ## Convert a operator or a comparator to Mongo Query Format def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) map_dict = { Operator.AND: "$and", Operator.OR: "$or", Comparator.EQ: "$eq", Comparator.NE: "$ne", Comparator.GTE: "$gte", Comparator.LTE: "$lte", Comparator.LT: "$lt", Comparator.GT: "$gt", Comparator.IN: "$in", Comparator.NIN: "$nin", } return map_dict[func] def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {self._format_func(operation.operator): args} def visit_comparison(self, comparison: Comparison) -> Dict: if comparison.comparator in MULTIPLE_ARITY_COMPARATORS and not isinstance( comparison.value, list ): comparison.value = [comparison.value] comparator = self._format_func(comparison.comparator) attribute = comparison.attribute return {attribute: {comparator: comparison.value}} def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"pre_filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/pgvector.py
from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class PGVectorTranslator(Visitor): """Translate `PGVector` internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GT, Comparator.LT, Comparator.IN, Comparator.NIN, Comparator.CONTAIN, Comparator.LIKE, ] """Subset of allowed logical comparators.""" def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) return f"{func.value}" def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {self._format_func(operation.operator): args} def visit_comparison(self, comparison: Comparison) -> Dict: return { comparison.attribute: { self._format_func(comparison.comparator): comparison.value } } def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/deeplake.py
"""Logic for converting internal query language to a valid Chroma query.""" from typing import Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) COMPARATOR_TO_TQL = { Comparator.EQ: "==", Comparator.GT: ">", Comparator.GTE: ">=", Comparator.LT: "<", Comparator.LTE: "<=", } OPERATOR_TO_TQL = { Operator.AND: "and", Operator.OR: "or", Operator.NOT: "NOT", } def can_cast_to_float(string: str) -> bool: """Check if a string can be cast to a float.""" try: float(string) return True except ValueError: return False class DeepLakeTranslator(Visitor): """Translate `DeepLake` internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR, Operator.NOT] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, ] """Subset of allowed logical comparators.""" def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) if isinstance(func, Operator): value = OPERATOR_TO_TQL[func.value] # type: ignore elif isinstance(func, Comparator): value = COMPARATOR_TO_TQL[func.value] # type: ignore return f"{value}" def visit_operation(self, operation: Operation) -> str: args = [arg.accept(self) for arg in operation.arguments] operator = self._format_func(operation.operator) return "(" + (" " + operator + " ").join(args) + ")" def visit_comparison(self, comparison: Comparison) -> str: comparator = self._format_func(comparison.comparator) values = comparison.value if isinstance(values, list): tql = [] for value in values: comparison.value = value tql.append(self.visit_comparison(comparison)) return "(" + (" or ").join(tql) + ")" if not can_cast_to_float(comparison.value): values = f"'{values}'" return f"metadata['{comparison.attribute}'] {comparator} {values}" def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: tqL = f"SELECT * WHERE {structured_query.filter.accept(self)}" kwargs = {"tql": tqL} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/pinecone.py
from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class PineconeTranslator(Visitor): """Translate `Pinecone` internal query language elements to valid filters.""" allowed_comparators = ( Comparator.EQ, Comparator.NE, Comparator.LT, Comparator.LTE, Comparator.GT, Comparator.GTE, Comparator.IN, Comparator.NIN, ) """Subset of allowed logical comparators.""" allowed_operators = (Operator.AND, Operator.OR) """Subset of allowed logical operators.""" def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) return f"${func.value}" def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {self._format_func(operation.operator): args} def visit_comparison(self, comparison: Comparison) -> Dict: if comparison.comparator in (Comparator.IN, Comparator.NIN) and not isinstance( comparison.value, list ): comparison.value = [comparison.value] return { comparison.attribute: { self._format_func(comparison.comparator): comparison.value } } def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/timescalevector.py
from __future__ import annotations from typing import TYPE_CHECKING, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) if TYPE_CHECKING: from timescale_vector import client class TimescaleVectorTranslator(Visitor): """Translate the internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR, Operator.NOT] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, ] COMPARATOR_MAP = { Comparator.EQ: "==", Comparator.GT: ">", Comparator.GTE: ">=", Comparator.LT: "<", Comparator.LTE: "<=", } OPERATOR_MAP = {Operator.AND: "AND", Operator.OR: "OR", Operator.NOT: "NOT"} def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) if isinstance(func, Operator): value = self.OPERATOR_MAP[func.value] # type: ignore elif isinstance(func, Comparator): value = self.COMPARATOR_MAP[func.value] # type: ignore return f"{value}" def visit_operation(self, operation: Operation) -> client.Predicates: try: from timescale_vector import client except ImportError as e: raise ImportError( "Cannot import timescale-vector. Please install with `pip install " "timescale-vector`." ) from e args = [arg.accept(self) for arg in operation.arguments] return client.Predicates(*args, operator=self._format_func(operation.operator)) def visit_comparison(self, comparison: Comparison) -> client.Predicates: try: from timescale_vector import client except ImportError as e: raise ImportError( "Cannot import timescale-vector. Please install with `pip install " "timescale-vector`." ) from e return client.Predicates( ( comparison.attribute, self._format_func(comparison.comparator), comparison.value, ) ) def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"predicates": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/dashvector.py
"""Logic for converting internal query language to a valid DashVector query.""" from typing import Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class DashvectorTranslator(Visitor): """Logic for converting internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR] allowed_comparators = [ Comparator.EQ, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.LIKE, ] map_dict = { Operator.AND: " AND ", Operator.OR: " OR ", Comparator.EQ: " = ", Comparator.GT: " > ", Comparator.GTE: " >= ", Comparator.LT: " < ", Comparator.LTE: " <= ", Comparator.LIKE: " LIKE ", } def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) return self.map_dict[func] def visit_operation(self, operation: Operation) -> str: args = [arg.accept(self) for arg in operation.arguments] return self._format_func(operation.operator).join(args) def visit_comparison(self, comparison: Comparison) -> str: value = comparison.value if isinstance(value, str): if comparison.comparator == Comparator.LIKE: value = f"'%{value}%'" else: value = f"'{value}'" return ( f"{comparison.attribute}{self._format_func(comparison.comparator)}{value}" ) def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/weaviate.py
from datetime import datetime from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class WeaviateTranslator(Visitor): """Translate `Weaviate` internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GTE, Comparator.LTE, Comparator.LT, Comparator.GT, ] def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) # https://weaviate.io/developers/weaviate/api/graphql/filters map_dict = { Operator.AND: "And", Operator.OR: "Or", Comparator.EQ: "Equal", Comparator.NE: "NotEqual", Comparator.GTE: "GreaterThanEqual", Comparator.LTE: "LessThanEqual", Comparator.LT: "LessThan", Comparator.GT: "GreaterThan", } return map_dict[func] def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {"operator": self._format_func(operation.operator), "operands": args} def visit_comparison(self, comparison: Comparison) -> Dict: value_type = "valueText" value = comparison.value if isinstance(comparison.value, bool): value_type = "valueBoolean" elif isinstance(comparison.value, float): value_type = "valueNumber" elif isinstance(comparison.value, int): value_type = "valueInt" elif ( isinstance(comparison.value, dict) and comparison.value.get("type") == "date" ): value_type = "valueDate" # ISO 8601 timestamp, formatted as RFC3339 date = datetime.strptime(comparison.value["date"], "%Y-%m-%d") value = date.strftime("%Y-%m-%dT%H:%M:%SZ") filter = { "path": [comparison.attribute], "operator": self._format_func(comparison.comparator), value_type: value, } return filter def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"where_filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/neo4j.py
from typing import Dict, Tuple, Union from langchain_core._api.deprecation import deprecated from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.query_constructors.neo4j.Neo4jTranslator", ) class Neo4jTranslator(Visitor): """Translate `Neo4j` internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GTE, Comparator.LTE, Comparator.LT, Comparator.GT, ] def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) map_dict = { Operator.AND: "$and", Operator.OR: "$or", Comparator.EQ: "$eq", Comparator.NE: "$ne", Comparator.GTE: "$gte", Comparator.LTE: "$lte", Comparator.LT: "$lt", Comparator.GT: "$gt", } return map_dict[func] def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {self._format_func(operation.operator): args} def visit_comparison(self, comparison: Comparison) -> Dict: return { comparison.attribute: { self._format_func(comparison.comparator): comparison.value } } def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/vectara.py
from typing import Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) def process_value(value: Union[int, float, str]) -> str: """Convert a value to a string and add single quotes if it is a string.""" if isinstance(value, str): return f"'{value}'" else: return str(value) class VectaraTranslator(Visitor): """Translate `Vectara` internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, ] """Subset of allowed logical comparators.""" def _format_func(self, func: Union[Operator, Comparator]) -> str: map_dict = { Operator.AND: " and ", Operator.OR: " or ", Comparator.EQ: "=", Comparator.NE: "!=", Comparator.GT: ">", Comparator.GTE: ">=", Comparator.LT: "<", Comparator.LTE: "<=", } self._validate_func(func) return map_dict[func] def visit_operation(self, operation: Operation) -> str: args = [arg.accept(self) for arg in operation.arguments] operator = self._format_func(operation.operator) return "( " + operator.join(args) + " )" def visit_comparison(self, comparison: Comparison) -> str: comparator = self._format_func(comparison.comparator) processed_value = process_value(comparison.value) attribute = comparison.attribute return ( "( " + "doc." + attribute + " " + comparator + " " + processed_value + " )" ) def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/astradb.py
"""Logic for converting internal query language to a valid AstraDB query.""" from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) MULTIPLE_ARITY_COMPARATORS = [Comparator.IN, Comparator.NIN] class AstraDBTranslator(Visitor): """Translate AstraDB internal query language elements to valid filters.""" """Subset of allowed logical comparators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.IN, Comparator.NIN, ] """Subset of allowed logical operators.""" allowed_operators = [Operator.AND, Operator.OR] def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) map_dict = { Operator.AND: "$and", Operator.OR: "$or", Comparator.EQ: "$eq", Comparator.NE: "$ne", Comparator.GTE: "$gte", Comparator.LTE: "$lte", Comparator.LT: "$lt", Comparator.GT: "$gt", Comparator.IN: "$in", Comparator.NIN: "$nin", } return map_dict[func] def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {self._format_func(operation.operator): args} def visit_comparison(self, comparison: Comparison) -> Dict: if comparison.comparator in MULTIPLE_ARITY_COMPARATORS and not isinstance( comparison.value, list ): comparison.value = [comparison.value] comparator = self._format_func(comparison.comparator) return {comparison.attribute: {comparator: comparison.value}} def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/supabase.py
from typing import Any, Dict, Tuple from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class SupabaseVectorTranslator(Visitor): """Translate Langchain filters to Supabase PostgREST filters.""" allowed_operators = [Operator.AND, Operator.OR] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.NE, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.LIKE, ] """Subset of allowed logical comparators.""" metadata_column: str = "metadata" def _map_comparator(self, comparator: Comparator) -> str: """ Maps Langchain comparator to PostgREST comparator: https://postgrest.org/en/stable/references/api/tables_views.html#operators """ postgrest_comparator = { Comparator.EQ: "eq", Comparator.NE: "neq", Comparator.GT: "gt", Comparator.GTE: "gte", Comparator.LT: "lt", Comparator.LTE: "lte", Comparator.LIKE: "like", }.get(comparator) if postgrest_comparator is None: raise Exception( f"Comparator '{comparator}' is not currently " "supported in Supabase Vector" ) return postgrest_comparator def _get_json_operator(self, value: Any) -> str: if isinstance(value, str): return "->>" else: return "->" def visit_operation(self, operation: Operation) -> str: args = [arg.accept(self) for arg in operation.arguments] return f"{operation.operator.value}({','.join(args)})" def visit_comparison(self, comparison: Comparison) -> str: if isinstance(comparison.value, list): return self.visit_operation( Operation( operator=Operator.AND, arguments=[ Comparison( comparator=comparison.comparator, attribute=comparison.attribute, value=value, ) for value in comparison.value ], ) ) return ".".join( [ f"{self.metadata_column}{self._get_json_operator(comparison.value)}{comparison.attribute}", f"{self._map_comparator(comparison.comparator)}", f"{comparison.value}", ] ) def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, Dict[str, str]]: if structured_query.filter is None: kwargs = {} else: kwargs = {"postgrest_filter": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/qdrant.py
from __future__ import annotations from typing import TYPE_CHECKING, Tuple from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) if TYPE_CHECKING: from qdrant_client.http import models as rest class QdrantTranslator(Visitor): """Translate `Qdrant` internal query language elements to valid filters.""" allowed_operators = ( Operator.AND, Operator.OR, Operator.NOT, ) """Subset of allowed logical operators.""" allowed_comparators = ( Comparator.EQ, Comparator.LT, Comparator.LTE, Comparator.GT, Comparator.GTE, Comparator.LIKE, ) """Subset of allowed logical comparators.""" def __init__(self, metadata_key: str): self.metadata_key = metadata_key def visit_operation(self, operation: Operation) -> rest.Filter: try: from qdrant_client.http import models as rest except ImportError as e: raise ImportError( "Cannot import qdrant_client. Please install with `pip install " "qdrant-client`." ) from e args = [arg.accept(self) for arg in operation.arguments] operator = { Operator.AND: "must", Operator.OR: "should", Operator.NOT: "must_not", }[operation.operator] return rest.Filter(**{operator: args}) def visit_comparison(self, comparison: Comparison) -> rest.FieldCondition: try: from qdrant_client.http import models as rest except ImportError as e: raise ImportError( "Cannot import qdrant_client. Please install with `pip install " "qdrant-client`." ) from e self._validate_func(comparison.comparator) attribute = self.metadata_key + "." + comparison.attribute if comparison.comparator == Comparator.EQ: return rest.FieldCondition( key=attribute, match=rest.MatchValue(value=comparison.value) ) if comparison.comparator == Comparator.LIKE: return rest.FieldCondition( key=attribute, match=rest.MatchText(text=comparison.value) ) kwargs = {comparison.comparator.value: comparison.value} return rest.FieldCondition(key=attribute, range=rest.Range(**kwargs)) def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: try: from qdrant_client.http import models as rest except ImportError as e: raise ImportError( "Cannot import qdrant_client. Please install with `pip install " "qdrant-client`." ) from e if structured_query.filter is None: kwargs = {} else: filter = structured_query.filter.accept(self) if isinstance(filter, rest.FieldCondition): filter = rest.Filter(must=[filter]) kwargs = {"filter": filter} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/myscale.py
import re from typing import Any, Callable, Dict, Tuple from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) def _DEFAULT_COMPOSER(op_name: str) -> Callable: """ Default composer for logical operators. Args: op_name: Name of the operator. Returns: Callable that takes a list of arguments and returns a string. """ def f(*args: Any) -> str: args_: map[str] = map(str, args) return f" {op_name} ".join(args_) return f def _FUNCTION_COMPOSER(op_name: str) -> Callable: """ Composer for functions. Args: op_name: Name of the function. Returns: Callable that takes a list of arguments and returns a string. """ def f(*args: Any) -> str: args_: map[str] = map(str, args) return f"{op_name}({','.join(args_)})" return f class MyScaleTranslator(Visitor): """Translate `MyScale` internal query language elements to valid filters.""" allowed_operators = [Operator.AND, Operator.OR, Operator.NOT] """Subset of allowed logical operators.""" allowed_comparators = [ Comparator.EQ, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.CONTAIN, Comparator.LIKE, ] map_dict = { Operator.AND: _DEFAULT_COMPOSER("AND"), Operator.OR: _DEFAULT_COMPOSER("OR"), Operator.NOT: _DEFAULT_COMPOSER("NOT"), Comparator.EQ: _DEFAULT_COMPOSER("="), Comparator.GT: _DEFAULT_COMPOSER(">"), Comparator.GTE: _DEFAULT_COMPOSER(">="), Comparator.LT: _DEFAULT_COMPOSER("<"), Comparator.LTE: _DEFAULT_COMPOSER("<="), Comparator.CONTAIN: _FUNCTION_COMPOSER("has"), Comparator.LIKE: _DEFAULT_COMPOSER("ILIKE"), } def __init__(self, metadata_key: str = "metadata") -> None: super().__init__() self.metadata_key = metadata_key def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] func = operation.operator self._validate_func(func) return self.map_dict[func](*args) def visit_comparison(self, comparison: Comparison) -> Dict: regex = r"\((.*?)\)" matched = re.search(r"\(\w+\)", comparison.attribute) # If arbitrary function is applied to an attribute if matched: attr = re.sub( regex, f"({self.metadata_key}.{matched.group(0)[1:-1]})", comparison.attribute, ) else: attr = f"{self.metadata_key}.{comparison.attribute}" value = comparison.value comp = comparison.comparator value = f"'{value}'" if isinstance(value, str) else value # convert timestamp for datetime objects if isinstance(value, dict) and value.get("type") == "date": attr = f"parseDateTime32BestEffort({attr})" value = f"parseDateTime32BestEffort('{value['date']}')" # string pattern match if comp is Comparator.LIKE: value = f"'%{value[1:-1]}%'" return self.map_dict[comp](attr, value) def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: print(structured_query) # noqa: T201 if structured_query.filter is None: kwargs = {} else: kwargs = {"where_str": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/milvus.py
"""Logic for converting internal query language to a valid Milvus query.""" from typing import Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) COMPARATOR_TO_BER = { Comparator.EQ: "==", Comparator.GT: ">", Comparator.GTE: ">=", Comparator.LT: "<", Comparator.LTE: "<=", Comparator.IN: "in", Comparator.LIKE: "like", } UNARY_OPERATORS = [Operator.NOT] def process_value(value: Union[int, float, str], comparator: Comparator) -> str: """Convert a value to a string and add double quotes if it is a string. It required for comparators involving strings. Args: value: The value to convert. comparator: The comparator. Returns: The converted value as a string. """ # if isinstance(value, str): if comparator is Comparator.LIKE: # If the comparator is LIKE, add a percent sign after it for prefix matching # and add double quotes return f'"{value}%"' else: # If the value is already a string, add double quotes return f'"{value}"' else: # If the value is not a string, convert it to a string without double quotes return str(value) class MilvusTranslator(Visitor): """Translate Milvus internal query language elements to valid filters.""" """Subset of allowed logical operators.""" allowed_operators = [Operator.AND, Operator.NOT, Operator.OR] """Subset of allowed logical comparators.""" allowed_comparators = [ Comparator.EQ, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.IN, Comparator.LIKE, ] def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) value = func.value if isinstance(func, Comparator): value = COMPARATOR_TO_BER[func] return f"{value}" def visit_operation(self, operation: Operation) -> str: if operation.operator in UNARY_OPERATORS and len(operation.arguments) == 1: operator = self._format_func(operation.operator) return operator + "(" + operation.arguments[0].accept(self) + ")" elif operation.operator in UNARY_OPERATORS: raise ValueError( f'"{operation.operator.value}" can have only one argument in Milvus' ) else: args = [arg.accept(self) for arg in operation.arguments] operator = self._format_func(operation.operator) return "(" + (" " + operator + " ").join(args) + ")" def visit_comparison(self, comparison: Comparison) -> str: comparator = self._format_func(comparison.comparator) processed_value = process_value(comparison.value, comparison.comparator) attribute = comparison.attribute return "( " + attribute + " " + comparator + " " + processed_value + " )" def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"expr": structured_query.filter.accept(self)} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/elasticsearch.py
from typing import Dict, Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class ElasticsearchTranslator(Visitor): """Translate `Elasticsearch` internal query language elements to valid filters.""" allowed_comparators = [ Comparator.EQ, Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, Comparator.CONTAIN, Comparator.LIKE, ] """Subset of allowed logical comparators.""" allowed_operators = [Operator.AND, Operator.OR, Operator.NOT] """Subset of allowed logical operators.""" def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) map_dict = { Operator.OR: "should", Operator.NOT: "must_not", Operator.AND: "must", Comparator.EQ: "term", Comparator.GT: "gt", Comparator.GTE: "gte", Comparator.LT: "lt", Comparator.LTE: "lte", Comparator.CONTAIN: "match", Comparator.LIKE: "match", } return map_dict[func] def visit_operation(self, operation: Operation) -> Dict: args = [arg.accept(self) for arg in operation.arguments] return {"bool": {self._format_func(operation.operator): args}} def visit_comparison(self, comparison: Comparison) -> Dict: # ElasticsearchStore filters require to target # the metadata object field field = f"metadata.{comparison.attribute}" is_range_comparator = comparison.comparator in [ Comparator.GT, Comparator.GTE, Comparator.LT, Comparator.LTE, ] if is_range_comparator: value = comparison.value if isinstance(comparison.value, dict) and "date" in comparison.value: value = comparison.value["date"] return {"range": {field: {self._format_func(comparison.comparator): value}}} if comparison.comparator == Comparator.CONTAIN: return { self._format_func(comparison.comparator): { field: {"query": comparison.value} } } if comparison.comparator == Comparator.LIKE: return { self._format_func(comparison.comparator): { field: {"query": comparison.value, "fuzziness": "AUTO"} } } # we assume that if the value is a string, # we want to use the keyword field field = f"{field}.keyword" if isinstance(comparison.value, str) else field if isinstance(comparison.value, dict): if "date" in comparison.value: comparison.value = comparison.value["date"] return {self._format_func(comparison.comparator): {field: comparison.value}} def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {"filter": [structured_query.filter.accept(self)]} return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/query_constructors/dingo.py
from typing import Tuple, Union from langchain_core.structured_query import ( Comparator, Comparison, Operation, Operator, StructuredQuery, Visitor, ) class DingoDBTranslator(Visitor): """Translate `DingoDB` internal query language elements to valid filters.""" allowed_comparators = ( Comparator.EQ, Comparator.NE, Comparator.LT, Comparator.LTE, Comparator.GT, Comparator.GTE, ) """Subset of allowed logical comparators.""" allowed_operators = (Operator.AND, Operator.OR) """Subset of allowed logical operators.""" def _format_func(self, func: Union[Operator, Comparator]) -> str: self._validate_func(func) return f"${func.value}" def visit_operation(self, operation: Operation) -> Operation: return operation def visit_comparison(self, comparison: Comparison) -> Comparison: return comparison def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = { "search_params": { "langchain_expr": structured_query.filter.accept(self) } } return structured_query.query, kwargs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/rdf_graph.py
from __future__ import annotations from typing import ( TYPE_CHECKING, Dict, List, Optional, ) if TYPE_CHECKING: import rdflib prefixes = { "owl": """PREFIX owl: <http://www.w3.org/2002/07/owl#>\n""", "rdf": """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\n""", "rdfs": """PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n""", "xsd": """PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n""", } cls_query_rdf = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a ?cls . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) cls_query_rdfs = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a/rdfs:subClassOf* ?cls . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) cls_query_owl = prefixes["rdfs"] + ( """SELECT DISTINCT ?cls ?com\n""" """WHERE { \n""" """ ?instance a/rdfs:subClassOf* ?cls . \n""" """ FILTER (isIRI(?cls)) . \n""" """ OPTIONAL { ?cls rdfs:comment ?com } \n""" """}""" ) rel_query_rdf = prefixes["rdfs"] + ( """SELECT DISTINCT ?rel ?com\n""" """WHERE { \n""" """ ?subj ?rel ?obj . \n""" """ OPTIONAL { ?rel rdfs:comment ?com } \n""" """}""" ) rel_query_rdfs = ( prefixes["rdf"] + prefixes["rdfs"] + ( """SELECT DISTINCT ?rel ?com\n""" """WHERE { \n""" """ ?rel a/rdfs:subPropertyOf* rdf:Property . \n""" """ OPTIONAL { ?rel rdfs:comment ?com } \n""" """}""" ) ) op_query_owl = ( prefixes["rdfs"] + prefixes["owl"] + ( """SELECT DISTINCT ?op ?com\n""" """WHERE { \n""" """ ?op a/rdfs:subPropertyOf* owl:ObjectProperty . \n""" """ OPTIONAL { ?op rdfs:comment ?com } \n""" """}""" ) ) dp_query_owl = ( prefixes["rdfs"] + prefixes["owl"] + ( """SELECT DISTINCT ?dp ?com\n""" """WHERE { \n""" """ ?dp a/rdfs:subPropertyOf* owl:DatatypeProperty . \n""" """ OPTIONAL { ?dp rdfs:comment ?com } \n""" """}""" ) ) class RdfGraph: """RDFlib wrapper for graph operations. Modes: * local: Local file - can be queried and changed * online: Online file - can only be queried, changes can be stored locally * store: Triple store - can be queried and changed if update_endpoint available Together with a source file, the serialization should be specified. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, source_file: Optional[str] = None, serialization: Optional[str] = "ttl", query_endpoint: Optional[str] = None, update_endpoint: Optional[str] = None, standard: Optional[str] = "rdf", local_copy: Optional[str] = None, graph_kwargs: Optional[Dict] = None, store_kwargs: Optional[Dict] = None, ) -> None: """ Set up the RDFlib graph :param source_file: either a path for a local file or a URL :param serialization: serialization of the input :param query_endpoint: SPARQL endpoint for queries, read access :param update_endpoint: SPARQL endpoint for UPDATE queries, write access :param standard: RDF, RDFS, or OWL :param local_copy: new local copy for storing changes :param graph_kwargs: Additional rdflib.Graph specific kwargs that will be used to initialize it, if query_endpoint is provided. :param store_kwargs: Additional sparqlstore.SPARQLStore specific kwargs that will be used to initialize it, if query_endpoint is provided. """ self.source_file = source_file self.serialization = serialization self.query_endpoint = query_endpoint self.update_endpoint = update_endpoint self.standard = standard self.local_copy = local_copy try: import rdflib from rdflib.plugins.stores import sparqlstore except ImportError: raise ImportError( "Could not import rdflib python package. " "Please install it with `pip install rdflib`." ) if self.standard not in (supported_standards := ("rdf", "rdfs", "owl")): raise ValueError( f"Invalid standard. Supported standards are: {supported_standards}." ) if ( not source_file and not query_endpoint or source_file and (query_endpoint or update_endpoint) ): raise ValueError( "Could not unambiguously initialize the graph wrapper. " "Specify either a file (local or online) via the source_file " "or a triple store via the endpoints." ) if source_file: if source_file.startswith("http"): self.mode = "online" else: self.mode = "local" if self.local_copy is None: self.local_copy = self.source_file self.graph = rdflib.Graph() self.graph.parse(source_file, format=self.serialization) if query_endpoint: store_kwargs = store_kwargs or {} self.mode = "store" if not update_endpoint: self._store = sparqlstore.SPARQLStore(**store_kwargs) self._store.open(query_endpoint) else: self._store = sparqlstore.SPARQLUpdateStore(**store_kwargs) self._store.open((query_endpoint, update_endpoint)) graph_kwargs = graph_kwargs or {} self.graph = rdflib.Graph(self._store, **graph_kwargs) # Verify that the graph was loaded if not len(self.graph): raise AssertionError("The graph is empty.") # Set schema self.schema = "" self.load_schema() @property def get_schema(self) -> str: """ Returns the schema of the graph database. """ return self.schema def query( self, query: str, ) -> List[rdflib.query.ResultRow]: """ Query the graph. """ from rdflib.exceptions import ParserError from rdflib.query import ResultRow try: res = self.graph.query(query) except ParserError as e: raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") return [r for r in res if isinstance(r, ResultRow)] def update( self, query: str, ) -> None: """ Update the graph. """ from rdflib.exceptions import ParserError try: self.graph.update(query) except ParserError as e: raise ValueError("Generated SPARQL statement is invalid\n" f"{e}") if self.local_copy: self.graph.serialize( destination=self.local_copy, format=self.local_copy.split(".")[-1] ) else: raise ValueError("No target file specified for saving the updated file.") @staticmethod def _get_local_name(iri: str) -> str: if "#" in iri: local_name = iri.split("#")[-1] elif "/" in iri: local_name = iri.split("/")[-1] else: raise ValueError(f"Unexpected IRI '{iri}', contains neither '#' nor '/'.") return local_name def _res_to_str(self, res: rdflib.query.ResultRow, var: str) -> str: return ( "<" + str(res[var]) + "> (" + self._get_local_name(res[var]) + ", " + str(res["com"]) + ")" ) def load_schema(self) -> None: """ Load the graph schema information. """ def _rdf_s_schema( classes: List[rdflib.query.ResultRow], relationships: List[rdflib.query.ResultRow], ) -> str: return ( f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The RDF graph supports the following node types:\n" f'{", ".join([self._res_to_str(r, "cls") for r in classes])}\n' f"The RDF graph supports the following relationships:\n" f'{", ".join([self._res_to_str(r, "rel") for r in relationships])}\n' ) if self.standard == "rdf": clss = self.query(cls_query_rdf) rels = self.query(rel_query_rdf) self.schema = _rdf_s_schema(clss, rels) elif self.standard == "rdfs": clss = self.query(cls_query_rdfs) rels = self.query(rel_query_rdfs) self.schema = _rdf_s_schema(clss, rels) elif self.standard == "owl": clss = self.query(cls_query_owl) ops = self.query(op_query_owl) dps = self.query(dp_query_owl) self.schema = ( f"In the following, each IRI is followed by the local name and " f"optionally its description in parentheses. \n" f"The OWL graph supports the following node types:\n" f'{", ".join([self._res_to_str(r, "cls") for r in clss])}\n' f"The OWL graph supports the following object properties, " f"i.e., relationships between objects:\n" f'{", ".join([self._res_to_str(r, "op") for r in ops])}\n' f"The OWL graph supports the following data properties, " f"i.e., relationships between objects and literals:\n" f'{", ".join([self._res_to_str(r, "dp") for r in dps])}\n' ) else: raise ValueError(f"Mode '{self.standard}' is currently not supported.")
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/neptune_rdf_graph.py
import json from types import SimpleNamespace from typing import Any, Dict, Optional, Sequence import requests # Query to find OWL datatype properties DTPROP_QUERY = """ SELECT DISTINCT ?elem WHERE { ?elem a owl:DatatypeProperty . } """ # Query to find OWL object properties OPROP_QUERY = """ SELECT DISTINCT ?elem WHERE { ?elem a owl:ObjectProperty . } """ ELEM_TYPES = { "classes": None, "rels": None, "dtprops": DTPROP_QUERY, "oprops": OPROP_QUERY, } class NeptuneRdfGraph: """Neptune wrapper for RDF graph operations. Args: host: endpoint for the database instance port: port number for the database instance, default is 8182 use_iam_auth: boolean indicating IAM auth is enabled in Neptune cluster use_https: whether to use secure connection, default is True client: optional boto3 Neptune client credentials_profile_name: optional AWS profile name region_name: optional AWS region, e.g., us-west-2 service: optional service name, default is neptunedata sign: optional, whether to sign the request payload, default is True Example: .. code-block:: python graph = NeptuneRdfGraph( host='<SPARQL host'>, port=<SPARQL port> ) schema = graph.get_schema() OR graph = NeptuneRdfGraph( host='<SPARQL host'>, port=<SPARQL port> ) schema_elem = graph.get_schema_elements() #... change schema_elements ... graph.load_schema(schema_elem) *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, host: str, port: int = 8182, use_https: bool = True, use_iam_auth: bool = False, client: Any = None, credentials_profile_name: Optional[str] = None, region_name: Optional[str] = None, service: str = "neptunedata", sign: bool = True, ) -> None: self.use_iam_auth = use_iam_auth self.region_name = region_name self.query_endpoint = f"https://{host}:{port}/sparql" try: if client is not None: self.client = client else: import boto3 if credentials_profile_name is not None: self.session = boto3.Session(profile_name=credentials_profile_name) else: # use default credentials self.session = boto3.Session() client_params = {} if region_name: client_params["region_name"] = region_name protocol = "https" if use_https else "http" client_params["endpoint_url"] = f"{protocol}://{host}:{port}" if sign: self.client = self.session.client(service, **client_params) else: from botocore import UNSIGNED from botocore.config import Config self.client = self.session.client( service, **client_params, config=Config(signature_version=UNSIGNED), ) except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: if type(e).__name__ == "UnknownServiceError": raise ImportError( "NeptuneGraph requires a boto3 version 1.28.38 or greater." "Please install it with `pip install -U boto3`." ) from e else: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e # Set schema self.schema = "" self.schema_elements: Dict[str, Any] = {} self._refresh_schema() @property def get_schema(self) -> str: """ Returns the schema of the graph database. """ return self.schema @property def get_schema_elements(self) -> Dict[str, Any]: return self.schema_elements def get_summary(self) -> Dict[str, Any]: """ Obtain Neptune statistical summary of classes and predicates in the graph. """ return self.client.get_rdf_graph_summary(mode="detailed") def query( self, query: str, ) -> Dict[str, Any]: """ Run Neptune query. """ request_data = {"query": query} data = request_data request_hdr = None if self.use_iam_auth: credentials = self.session.get_credentials() credentials = credentials.get_frozen_credentials() access_key = credentials.access_key secret_key = credentials.secret_key service = "neptune-db" session_token = credentials.token params = None creds = SimpleNamespace( access_key=access_key, secret_key=secret_key, token=session_token, region=self.region_name, ) from botocore.awsrequest import AWSRequest request = AWSRequest( method="POST", url=self.query_endpoint, data=data, params=params ) from botocore.auth import SigV4Auth SigV4Auth(creds, service, self.region_name).add_auth(request) request.headers["Content-Type"] = "application/x-www-form-urlencoded" request_hdr = request.headers else: request_hdr = {} request_hdr["Content-Type"] = "application/x-www-form-urlencoded" queryres = requests.request( method="POST", url=self.query_endpoint, headers=request_hdr, data=data ) json_resp = json.loads(queryres.text) return json_resp def load_schema(self, schema_elements: Dict[str, Any]) -> None: """ Generates and sets schema from schema_elements. Helpful in cases where introspected schema needs pruning. """ elem_str = {} for elem in ELEM_TYPES: res_list = [] for elem_rec in schema_elements[elem]: uri = elem_rec["uri"] local = elem_rec["local"] res_str = f"<{uri}> ({local})" res_list.append(res_str) elem_str[elem] = ", ".join(res_list) self.schema = ( "In the following, each IRI is followed by the local name and " "optionally its description in parentheses. \n" "The graph supports the following node types:\n" f"{elem_str['classes']}\n" "The graph supports the following relationships:\n" f"{elem_str['rels']}\n" "The graph supports the following OWL object properties:\n" f"{elem_str['dtprops']}\n" "The graph supports the following OWL data properties:\n" f"{elem_str['oprops']}" ) def _get_local_name(self, iri: str) -> Sequence[str]: """ Split IRI into prefix and local """ if "#" in iri: tokens = iri.split("#") return [f"{tokens[0]}#", tokens[-1]] elif "/" in iri: tokens = iri.split("/") return [f"{'/'.join(tokens[0:len(tokens)-1])}/", tokens[-1]] else: raise ValueError(f"Unexpected IRI '{iri}', contains neither '#' nor '/'.") def _refresh_schema(self) -> None: """ Query Neptune to introspect schema. """ self.schema_elements["distinct_prefixes"] = {} # get summary and build list of classes and rels summary = self.get_summary() reslist = [] for c in summary["payload"]["graphSummary"]["classes"]: uri = c tokens = self._get_local_name(uri) elem_record = {"uri": uri, "local": tokens[1]} reslist.append(elem_record) if tokens[0] not in self.schema_elements["distinct_prefixes"]: self.schema_elements["distinct_prefixes"][tokens[0]] = "y" self.schema_elements["classes"] = reslist reslist = [] for r in summary["payload"]["graphSummary"]["predicates"]: for p in r: uri = p tokens = self._get_local_name(uri) elem_record = {"uri": uri, "local": tokens[1]} reslist.append(elem_record) if tokens[0] not in self.schema_elements["distinct_prefixes"]: self.schema_elements["distinct_prefixes"][tokens[0]] = "y" self.schema_elements["rels"] = reslist # get dtprops and oprops too for elem in ELEM_TYPES: q = ELEM_TYPES.get(elem) if not q: continue items = self.query(q) reslist = [] for r in items["results"]["bindings"]: uri = r["elem"]["value"] tokens = self._get_local_name(uri) elem_record = {"uri": uri, "local": tokens[1]} reslist.append(elem_record) if tokens[0] not in self.schema_elements["distinct_prefixes"]: self.schema_elements["distinct_prefixes"][tokens[0]] = "y" self.schema_elements[elem] = reslist self.load_schema(self.schema_elements)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/ontotext_graphdb_graph.py
from __future__ import annotations import os from typing import ( TYPE_CHECKING, List, Optional, Union, ) if TYPE_CHECKING: import rdflib class OntotextGraphDBGraph: """Ontotext GraphDB https://graphdb.ontotext.com/ wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, query_endpoint: str, query_ontology: Optional[str] = None, local_file: Optional[str] = None, local_file_format: Optional[str] = None, ) -> None: """ Set up the GraphDB wrapper :param query_endpoint: SPARQL endpoint for queries, read access If GraphDB is secured, set the environment variables 'GRAPHDB_USERNAME' and 'GRAPHDB_PASSWORD'. :param query_ontology: a `CONSTRUCT` query that is executed on the SPARQL endpoint and returns the KG schema statements Example: 'CONSTRUCT {?s ?p ?o} FROM <https://example.com/ontology/> WHERE {?s ?p ?o}' Currently, DESCRIBE queries like 'PREFIX onto: <https://example.com/ontology/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> DESCRIBE ?term WHERE { ?term rdfs:isDefinedBy onto: }' are not supported, because DESCRIBE returns the Symmetric Concise Bounded Description (SCBD), i.e. also the incoming class links. In case of large graphs with a million of instances, this is not efficient. Check https://github.com/eclipse-rdf4j/rdf4j/issues/4857 :param local_file: a local RDF ontology file. Supported RDF formats: Turtle, RDF/XML, JSON-LD, N-Triples, Notation-3, Trig, Trix, N-Quads. If the rdf format can't be determined from the file extension, pass explicitly the rdf format in `local_file_format` param. :param local_file_format: Used if the rdf format can't be determined from the local file extension. One of "json-ld", "xml", "n3", "turtle", "nt", "trig", "nquads", "trix" Either `query_ontology` or `local_file` should be passed. """ if query_ontology and local_file: raise ValueError("Both file and query provided. Only one is allowed.") if not query_ontology and not local_file: raise ValueError("Neither file nor query provided. One is required.") try: import rdflib from rdflib.plugins.stores import sparqlstore except ImportError: raise ImportError( "Could not import rdflib python package. " "Please install it with `pip install rdflib`." ) auth = self._get_auth() store = sparqlstore.SPARQLStore(auth=auth) store.open(query_endpoint) self.graph = rdflib.Graph(store, identifier=None, bind_namespaces="none") self._check_connectivity() if local_file: ontology_schema_graph = self._load_ontology_schema_from_file( local_file, local_file_format, # type: ignore[arg-type] ) else: self._validate_user_query(query_ontology) # type: ignore[arg-type] ontology_schema_graph = self._load_ontology_schema_with_query( query_ontology # type: ignore[arg-type] ) self.schema = ontology_schema_graph.serialize(format="turtle") @staticmethod def _get_auth() -> Union[tuple, None]: """ Returns the basic authentication configuration """ username = os.environ.get("GRAPHDB_USERNAME", None) password = os.environ.get("GRAPHDB_PASSWORD", None) if username: if not password: raise ValueError( "Environment variable 'GRAPHDB_USERNAME' is set, " "but 'GRAPHDB_PASSWORD' is not set." ) else: return username, password return None def _check_connectivity(self) -> None: """ Executes a simple `ASK` query to check connectivity """ try: self.graph.query("ASK { ?s ?p ?o }") except ValueError: raise ValueError( "Could not query the provided endpoint. " "Please, check, if the value of the provided " "query_endpoint points to the right repository. " "If GraphDB is secured, please, " "make sure that the environment variables " "'GRAPHDB_USERNAME' and 'GRAPHDB_PASSWORD' are set." ) @staticmethod def _load_ontology_schema_from_file(local_file: str, local_file_format: str = None): # type: ignore[no-untyped-def, assignment] """ Parse the ontology schema statements from the provided file """ import rdflib if not os.path.exists(local_file): raise FileNotFoundError(f"File {local_file} does not exist.") if not os.access(local_file, os.R_OK): raise PermissionError(f"Read permission for {local_file} is restricted") graph = rdflib.ConjunctiveGraph() try: graph.parse(local_file, format=local_file_format) except Exception as e: raise ValueError(f"Invalid file format for {local_file} : ", e) return graph @staticmethod def _validate_user_query(query_ontology: str) -> None: """ Validate the query is a valid SPARQL CONSTRUCT query """ from pyparsing import ParseException from rdflib.plugins.sparql import prepareQuery if not isinstance(query_ontology, str): raise TypeError("Ontology query must be provided as string.") try: parsed_query = prepareQuery(query_ontology) except ParseException as e: raise ValueError("Ontology query is not a valid SPARQL query.", e) if parsed_query.algebra.name != "ConstructQuery": raise ValueError( "Invalid query type. Only CONSTRUCT queries are supported." ) def _load_ontology_schema_with_query(self, query: str): # type: ignore[no-untyped-def] """ Execute the query for collecting the ontology schema statements """ from rdflib.exceptions import ParserError try: results = self.graph.query(query) except ParserError as e: raise ValueError(f"Generated SPARQL statement is invalid\n{e}") return results.graph @property def get_schema(self) -> str: """ Returns the schema of the graph database in turtle format """ return self.schema def query( self, query: str, ) -> List[rdflib.query.ResultRow]: """ Query the graph. """ from rdflib.query import ResultRow res = self.graph.query(query) return [r for r in res if isinstance(r, ResultRow)]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/hugegraph.py
from typing import Any, Dict, List class HugeGraph: """HugeGraph wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, username: str = "default", password: str = "default", address: str = "127.0.0.1", port: int = 8081, graph: str = "hugegraph", ) -> None: """Create a new HugeGraph wrapper instance.""" try: from hugegraph.connection import PyHugeGraph except ImportError: raise ImportError( "Please install HugeGraph Python client first: " "`pip3 install hugegraph-python`" ) self.username = username self.password = password self.address = address self.port = port self.graph = graph self.client = PyHugeGraph( address, port, user=username, pwd=password, graph=graph ) self.schema = "" # Set schema try: self.refresh_schema() except Exception as e: raise ValueError(f"Could not refresh schema. Error: {e}") @property def get_schema(self) -> str: """Returns the schema of the HugeGraph database""" return self.schema def refresh_schema(self) -> None: """ Refreshes the HugeGraph schema information. """ schema = self.client.schema() vertex_schema = schema.getVertexLabels() edge_schema = schema.getEdgeLabels() relationships = schema.getRelations() self.schema = ( f"Node properties: {vertex_schema}\n" f"Edge properties: {edge_schema}\n" f"Relationships: {relationships}\n" ) def query(self, query: str) -> List[Dict[str, Any]]: g = self.client.gremlin() res = g.exec(query) return res["data"]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/nebula_graph.py
import logging from string import Template from typing import Any, Dict, Optional logger = logging.getLogger(__name__) rel_query = Template( """ MATCH ()-[e:`$edge_type`]->() WITH e limit 1 MATCH (m)-[:`$edge_type`]->(n) WHERE id(m) == src(e) AND id(n) == dst(e) RETURN "(:" + tags(m)[0] + ")-[:$edge_type]->(:" + tags(n)[0] + ")" AS rels """ ) RETRY_TIMES = 3 class NebulaGraph: """NebulaGraph wrapper for graph operations. NebulaGraph inherits methods from Neo4jGraph to bring ease to the user space. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, space: str, username: str = "root", password: str = "nebula", address: str = "127.0.0.1", port: int = 9669, session_pool_size: int = 30, ) -> None: """Create a new NebulaGraph wrapper instance.""" try: import nebula3 # noqa: F401 import pandas # noqa: F401 except ImportError: raise ImportError( "Please install NebulaGraph Python client and pandas first: " "`pip install nebula3-python pandas`" ) self.username = username self.password = password self.address = address self.port = port self.space = space self.session_pool_size = session_pool_size self.session_pool = self._get_session_pool() self.schema = "" # Set schema try: self.refresh_schema() except Exception as e: raise ValueError(f"Could not refresh schema. Error: {e}") def _get_session_pool(self) -> Any: assert all( [self.username, self.password, self.address, self.port, self.space] ), ( "Please provide all of the following parameters: " "username, password, address, port, space" ) from nebula3.Config import SessionPoolConfig from nebula3.Exception import AuthFailedException, InValidHostname from nebula3.gclient.net.SessionPool import SessionPool config = SessionPoolConfig() config.max_size = self.session_pool_size try: session_pool = SessionPool( self.username, self.password, self.space, [(self.address, self.port)], ) except InValidHostname: raise ValueError( "Could not connect to NebulaGraph database. " "Please ensure that the address and port are correct" ) try: session_pool.init(config) except AuthFailedException: raise ValueError( "Could not connect to NebulaGraph database. " "Please ensure that the username and password are correct" ) except RuntimeError as e: raise ValueError(f"Error initializing session pool. Error: {e}") return session_pool def __del__(self) -> None: try: self.session_pool.close() except Exception as e: logger.warning(f"Could not close session pool. Error: {e}") @property def get_schema(self) -> str: """Returns the schema of the NebulaGraph database""" return self.schema def execute(self, query: str, params: Optional[dict] = None, retry: int = 0) -> Any: """Query NebulaGraph database.""" from nebula3.Exception import IOErrorException, NoValidSessionException from nebula3.fbthrift.transport.TTransport import TTransportException params = params or {} try: result = self.session_pool.execute_parameter(query, params) if not result.is_succeeded(): logger.warning( f"Error executing query to NebulaGraph. " f"Error: {result.error_msg()}\n" f"Query: {query} \n" ) return result except NoValidSessionException: logger.warning( f"No valid session found in session pool. " f"Please consider increasing the session pool size. " f"Current size: {self.session_pool_size}" ) raise ValueError( f"No valid session found in session pool. " f"Please consider increasing the session pool size. " f"Current size: {self.session_pool_size}" ) except RuntimeError as e: if retry < RETRY_TIMES: retry += 1 logger.warning( f"Error executing query to NebulaGraph. " f"Retrying ({retry}/{RETRY_TIMES})...\n" f"query: {query} \n" f"Error: {e}" ) return self.execute(query, params, retry) else: raise ValueError(f"Error executing query to NebulaGraph. Error: {e}") except (TTransportException, IOErrorException): # connection issue, try to recreate session pool if retry < RETRY_TIMES: retry += 1 logger.warning( f"Connection issue with NebulaGraph. " f"Retrying ({retry}/{RETRY_TIMES})...\n to recreate session pool" ) self.session_pool = self._get_session_pool() return self.execute(query, params, retry) def refresh_schema(self) -> None: """ Refreshes the NebulaGraph schema information. """ tags_schema, edge_types_schema, relationships = [], [], [] for tag in self.execute("SHOW TAGS").column_values("Name"): tag_name = tag.cast() tag_schema = {"tag": tag_name, "properties": []} r = self.execute(f"DESCRIBE TAG `{tag_name}`") props, types = r.column_values("Field"), r.column_values("Type") for i in range(r.row_size()): tag_schema["properties"].append((props[i].cast(), types[i].cast())) tags_schema.append(tag_schema) for edge_type in self.execute("SHOW EDGES").column_values("Name"): edge_type_name = edge_type.cast() edge_schema = {"edge": edge_type_name, "properties": []} r = self.execute(f"DESCRIBE EDGE `{edge_type_name}`") props, types = r.column_values("Field"), r.column_values("Type") for i in range(r.row_size()): edge_schema["properties"].append((props[i].cast(), types[i].cast())) edge_types_schema.append(edge_schema) # build relationships types r = self.execute( rel_query.substitute(edge_type=edge_type_name) ).column_values("rels") if len(r) > 0: relationships.append(r[0].cast()) self.schema = ( f"Node properties: {tags_schema}\n" f"Edge properties: {edge_types_schema}\n" f"Relationships: {relationships}\n" ) def query(self, query: str, retry: int = 0) -> Dict[str, Any]: result = self.execute(query, retry=retry) columns = result.keys() d: Dict[str, list] = {} for col_num in range(result.col_size()): col_name = columns[col_num] col_list = result.column_values(col_name) d[col_name] = [x.cast() for x in col_list] return d
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/kuzu_graph.py
from typing import Any, Dict, List class KuzuGraph: """Kùzu wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__(self, db: Any, database: str = "kuzu") -> None: try: import kuzu except ImportError: raise ImportError( "Could not import Kùzu python package." "Please install Kùzu with `pip install kuzu`." ) self.db = db self.conn = kuzu.Connection(self.db) self.database = database self.refresh_schema() @property def get_schema(self) -> str: """Returns the schema of the Kùzu database""" return self.schema def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """Query Kùzu database""" result = self.conn.execute(query, params) column_names = result.get_column_names() return_list = [] while result.has_next(): row = result.get_next() return_list.append(dict(zip(column_names, row))) return return_list def refresh_schema(self) -> None: """Refreshes the Kùzu graph schema information""" node_properties = [] node_table_names = self.conn._get_node_table_names() for table_name in node_table_names: current_table_schema = {"properties": [], "label": table_name} properties = self.conn._get_node_property_names(table_name) for property_name in properties: property_type = properties[property_name]["type"] list_type_flag = "" if properties[property_name]["dimension"] > 0: if "shape" in properties[property_name]: for s in properties[property_name]["shape"]: list_type_flag += "[%s]" % s else: for i in range(properties[property_name]["dimension"]): list_type_flag += "[]" property_type += list_type_flag current_table_schema["properties"].append( (property_name, property_type) ) node_properties.append(current_table_schema) relationships = [] rel_tables = self.conn._get_rel_table_names() for table in rel_tables: relationships.append( "(:%s)-[:%s]->(:%s)" % (table["src"], table["name"], table["dst"]) ) rel_properties = [] for table in rel_tables: table_name = table["name"] current_table_schema = {"properties": [], "label": table_name} query_result = self.conn.execute( f"CALL table_info('{table_name}') RETURN *;" ) while query_result.has_next(): row = query_result.get_next() prop_name = row[1] prop_type = row[2] current_table_schema["properties"].append((prop_name, prop_type)) rel_properties.append(current_table_schema) self.schema = ( f"Node properties: {node_properties}\n" f"Relationships properties: {rel_properties}\n" f"Relationships: {relationships}\n" )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/gremlin_graph.py
import hashlib import sys from typing import Any, Dict, List, Optional, Union from langchain_core.utils import get_from_env from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship from langchain_community.graphs.graph_store import GraphStore class GremlinGraph(GraphStore): """Gremlin wrapper for graph operations. Parameters: url (Optional[str]): The URL of the Gremlin database server or env GREMLIN_URI username (Optional[str]): The collection-identifier like '/dbs/database/colls/graph' or env GREMLIN_USERNAME if none provided password (Optional[str]): The connection-key for database authentication or env GREMLIN_PASSWORD if none provided traversal_source (str): The traversal source to use for queries. Defaults to 'g'. message_serializer (Optional[Any]): The message serializer to use for requests. Defaults to serializer.GraphSONSerializersV2d0() *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. *Implementation details*: The Gremlin queries are designed to work with Azure CosmosDB limitations """ @property def get_structured_schema(self) -> Dict[str, Any]: return self.structured_schema def __init__( self, url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, traversal_source: str = "g", message_serializer: Optional[Any] = None, ) -> None: """Create a new Gremlin graph wrapper instance.""" try: import asyncio from gremlin_python.driver import client, serializer if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) except ImportError: raise ImportError( "Please install gremlin-python first: " "`pip3 install gremlinpython" ) self.client = client.Client( url=get_from_env("url", "GREMLIN_URI", url), traversal_source=traversal_source, username=get_from_env("username", "GREMLIN_USERNAME", username), password=get_from_env("password", "GREMLIN_PASSWORD", password), message_serializer=message_serializer if message_serializer else serializer.GraphSONSerializersV2d0(), ) self.schema: str = "" @property def get_schema(self) -> str: """Returns the schema of the Gremlin database""" if len(self.schema) == 0: self.refresh_schema() return self.schema def refresh_schema(self) -> None: """ Refreshes the Gremlin graph schema information. """ vertex_schema = self.client.submit("g.V().label().dedup()").all().result() edge_schema = self.client.submit("g.E().label().dedup()").all().result() vertex_properties = ( self.client.submit( "g.V().group().by(label).by(properties().label().dedup().fold())" ) .all() .result()[0] ) self.structured_schema = { "vertex_labels": vertex_schema, "edge_labels": edge_schema, "vertice_props": vertex_properties, } self.schema = "\n".join( [ "Vertex labels are the following:", ",".join(vertex_schema), "Edge labes are the following:", ",".join(edge_schema), f"Vertices have following properties:\n{vertex_properties}", ] ) def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: q = self.client.submit(query) return q.all().result() def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False ) -> None: """ Take GraphDocument as input as uses it to construct a graph. """ node_cache: Dict[Union[str, int], Node] = {} for document in graph_documents: if include_source: # Create document vertex doc_props = { "page_content": document.source.page_content, "metadata": document.source.metadata, } doc_id = hashlib.md5(document.source.page_content.encode()).hexdigest() doc_node = self.add_node( Node(id=doc_id, type="Document", properties=doc_props), node_cache ) # Import nodes to vertices for n in document.nodes: node = self.add_node(n) if include_source: # Add Edge to document for each node self.add_edge( Relationship( type="contains information about", source=doc_node, target=node, properties={}, ) ) self.add_edge( Relationship( type="is extracted from", source=node, target=doc_node, properties={}, ) ) # Edges for el in document.relationships: # Find or create the source vertex self.add_node(el.source, node_cache) # Find or create the target vertex self.add_node(el.target, node_cache) # Find or create the edge self.add_edge(el) def build_vertex_query(self, node: Node) -> str: base_query = ( f"g.V().has('id','{node.id}').fold()" + f".coalesce(unfold(),addV('{node.type}')" + f".property('id','{node.id}')" + f".property('type','{node.type}')" ) for key, value in node.properties.items(): base_query += f".property('{key}', '{value}')" return base_query + ")" def build_edge_query(self, relationship: Relationship) -> str: source_query = f".has('id','{relationship.source.id}')" target_query = f".has('id','{relationship.target.id}')" base_query = f""""g.V(){source_query}.as('a') .V(){target_query}.as('b') .choose( __.inE('{relationship.type}').where(outV().as('a')), __.identity(), __.addE('{relationship.type}').from('a').to('b') ) """.replace("\n", "").replace("\t", "") for key, value in relationship.properties.items(): base_query += f".property('{key}', '{value}')" return base_query def add_node(self, node: Node, node_cache: dict = {}) -> Node: # if properties does not have label, add type as label if "label" not in node.properties: node.properties["label"] = node.type if node.id in node_cache: return node_cache[node.id] else: query = self.build_vertex_query(node) _ = self.client.submit(query).all().result()[0] node_cache[node.id] = node return node def add_edge(self, relationship: Relationship) -> Any: query = self.build_edge_query(relationship) return self.client.submit(query).all().result()
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/graph_document.py
from __future__ import annotations from typing import List, Union from langchain_core.documents import Document from langchain_core.load.serializable import Serializable from pydantic import Field class Node(Serializable): """Represents a node in a graph with associated properties. Attributes: id (Union[str, int]): A unique identifier for the node. type (str): The type or label of the node, default is "Node". properties (dict): Additional properties and metadata associated with the node. """ id: Union[str, int] type: str = "Node" properties: dict = Field(default_factory=dict) class Relationship(Serializable): """Represents a directed relationship between two nodes in a graph. Attributes: source (Node): The source node of the relationship. target (Node): The target node of the relationship. type (str): The type of the relationship. properties (dict): Additional properties associated with the relationship. """ source: Node target: Node type: str properties: dict = Field(default_factory=dict) class GraphDocument(Serializable): """Represents a graph document consisting of nodes and relationships. Attributes: nodes (List[Node]): A list of nodes in the graph. relationships (List[Relationship]): A list of relationships in the graph. source (Document): The document from which the graph information is derived. """ nodes: List[Node] relationships: List[Relationship] source: Document
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/index_creator.py
from typing import Optional, Type from pydantic import BaseModel from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate from langchain_core.prompts.prompt import PromptTemplate from langchain_community.graphs import NetworkxEntityGraph from langchain_community.graphs.networkx_graph import KG_TRIPLE_DELIMITER from langchain_community.graphs.networkx_graph import parse_triples # flake8: noqa _DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( "You are a networked intelligence helping a human track knowledge triples" " about all relevant people, things, concepts, etc. and integrating" " them with your knowledge stored within your weights" " as well as that stored in a knowledge graph." " Extract all of the knowledge triples from the text." " A knowledge triple is a clause that contains a subject, a predicate," " and an object. The subject is the entity being described," " the predicate is the property of the subject that is being" " described, and the object is the value of the property.\n\n" "EXAMPLE\n" "It's a state in the US. It's also the number 1 producer of gold in the US.\n\n" f"Output: (Nevada, is a, state){KG_TRIPLE_DELIMITER}(Nevada, is in, US)" f"{KG_TRIPLE_DELIMITER}(Nevada, is the number 1 producer of, gold)\n" "END OF EXAMPLE\n\n" "EXAMPLE\n" "I'm going to the store.\n\n" "Output: NONE\n" "END OF EXAMPLE\n\n" "EXAMPLE\n" "Oh huh. I know Descartes likes to drive antique scooters and play the mandolin.\n" f"Output: (Descartes, likes to drive, antique scooters){KG_TRIPLE_DELIMITER}(Descartes, plays, mandolin)\n" "END OF EXAMPLE\n\n" "EXAMPLE\n" "{text}" "Output:" ) KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT = PromptTemplate( input_variables=["text"], template=_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE, ) class GraphIndexCreator(BaseModel): """Functionality to create graph index.""" llm: Optional[BaseLanguageModel] = None graph_type: Type[NetworkxEntityGraph] = NetworkxEntityGraph def from_text( self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT ) -> NetworkxEntityGraph: """Create graph index from text.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() # Temporary local scoped import while community does not depend on # langchain explicitly try: from langchain.chains import LLMChain except ImportError: raise ImportError( "Please install langchain to use this functionality. " "You can install it with `pip install langchain`." ) chain = LLMChain(llm=self.llm, prompt=prompt) output = chain.predict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph async def afrom_text( self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT ) -> NetworkxEntityGraph: """Create graph index from text asynchronously.""" if self.llm is None: raise ValueError("llm should not be None") graph = self.graph_type() # Temporary local scoped import while community does not depend on # langchain explicitly try: from langchain.chains import LLMChain except ImportError: raise ImportError( "Please install langchain to use this functionality. " "You can install it with `pip install langchain`." ) chain = LLMChain(llm=self.llm, prompt=prompt) output = await chain.apredict(text=text) knowledge = parse_triples(output) for triple in knowledge: graph.add_triple(triple) return graph
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/networkx_graph.py
"""Networkx wrapper for graph operations.""" from __future__ import annotations from typing import Any, List, NamedTuple, Optional, Tuple KG_TRIPLE_DELIMITER = "<|>" class KnowledgeTriple(NamedTuple): """Knowledge triple in the graph.""" subject: str predicate: str object_: str @classmethod def from_string(cls, triple_string: str) -> "KnowledgeTriple": """Create a KnowledgeTriple from a string.""" subject, predicate, object_ = triple_string.strip().split(", ") subject = subject[1:] object_ = object_[:-1] return cls(subject, predicate, object_) def parse_triples(knowledge_str: str) -> List[KnowledgeTriple]: """Parse knowledge triples from the knowledge string.""" knowledge_str = knowledge_str.strip() if not knowledge_str or knowledge_str == "NONE": return [] triple_strs = knowledge_str.split(KG_TRIPLE_DELIMITER) results = [] for triple_str in triple_strs: try: kg_triple = KnowledgeTriple.from_string(triple_str) except ValueError: continue results.append(kg_triple) return results def get_entities(entity_str: str) -> List[str]: """Extract entities from entity string.""" if entity_str.strip() == "NONE": return [] else: return [w.strip() for w in entity_str.split(",")] class NetworkxEntityGraph: """Networkx wrapper for entity graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__(self, graph: Optional[Any] = None) -> None: """Create a new graph.""" try: import networkx as nx except ImportError: raise ImportError( "Could not import networkx python package. " "Please install it with `pip install networkx`." ) if graph is not None: if not isinstance(graph, nx.DiGraph): raise ValueError("Passed in graph is not of correct shape") self._graph = graph else: self._graph = nx.DiGraph() @classmethod def from_gml(cls, gml_path: str) -> NetworkxEntityGraph: try: import networkx as nx except ImportError: raise ImportError( "Could not import networkx python package. " "Please install it with `pip install networkx`." ) graph = nx.read_gml(gml_path) return cls(graph) def add_triple(self, knowledge_triple: KnowledgeTriple) -> None: """Add a triple to the graph.""" # Creates nodes if they don't exist # Overwrites existing edges if not self._graph.has_node(knowledge_triple.subject): self._graph.add_node(knowledge_triple.subject) if not self._graph.has_node(knowledge_triple.object_): self._graph.add_node(knowledge_triple.object_) self._graph.add_edge( knowledge_triple.subject, knowledge_triple.object_, relation=knowledge_triple.predicate, ) def delete_triple(self, knowledge_triple: KnowledgeTriple) -> None: """Delete a triple from the graph.""" if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_): self._graph.remove_edge(knowledge_triple.subject, knowledge_triple.object_) def get_triples(self) -> List[Tuple[str, str, str]]: """Get all triples in the graph.""" return [(u, v, d["relation"]) for u, v, d in self._graph.edges(data=True)] def get_entity_knowledge(self, entity: str, depth: int = 1) -> List[str]: """Get information about an entity.""" import networkx as nx # TODO: Have more information-specific retrieval methods if not self._graph.has_node(entity): return [] results = [] for src, sink in nx.dfs_edges(self._graph, entity, depth_limit=depth): relation = self._graph[src][sink]["relation"] results.append(f"{src} {relation} {sink}") return results def write_to_gml(self, path: str) -> None: import networkx as nx nx.write_gml(self._graph, path) def clear(self) -> None: """Clear the graph.""" self._graph.clear() def clear_edges(self) -> None: """Clear the graph edges.""" self._graph.clear_edges() def add_node(self, node: str) -> None: """Add node in the graph.""" self._graph.add_node(node) def remove_node(self, node: str) -> None: """Remove node from the graph.""" if self._graph.has_node(node): self._graph.remove_node(node) def has_node(self, node: str) -> bool: """Return if graph has the given node.""" return self._graph.has_node(node) def remove_edge(self, source_node: str, destination_node: str) -> None: """Remove edge from the graph.""" self._graph.remove_edge(source_node, destination_node) def has_edge(self, source_node: str, destination_node: str) -> bool: """Return if graph has an edge between the given nodes.""" if self._graph.has_node(source_node) and self._graph.has_node(destination_node): return self._graph.has_edge(source_node, destination_node) else: return False def get_neighbors(self, node: str) -> List[str]: """Return the neighbor nodes of the given node.""" return self._graph.neighbors(node) def get_number_of_nodes(self) -> int: """Get number of nodes in the graph.""" return self._graph.number_of_nodes() def get_topological_sort(self) -> List[str]: """Get a list of entity names in the graph sorted by causal dependence.""" import networkx as nx return list(nx.topological_sort(self._graph)) def draw_graphviz(self, **kwargs: Any) -> None: """ Provides better drawing Usage in a jupyter notebook: >>> from IPython.display import SVG >>> self.draw_graphviz_svg(layout="dot", filename="web.svg") >>> SVG('web.svg') """ from networkx.drawing.nx_agraph import to_agraph try: import pygraphviz # noqa: F401 except ImportError as e: if e.name == "_graphviz": """ >>> e.msg # pygraphviz throws this error ImportError: libcgraph.so.6: cannot open shared object file """ raise ImportError( "Could not import graphviz debian package. " "Please install it with:" "`sudo apt-get update`" "`sudo apt-get install graphviz graphviz-dev`" ) else: raise ImportError( "Could not import pygraphviz python package. " "Please install it with:" "`pip install pygraphviz`." ) graph = to_agraph(self._graph) # --> pygraphviz.agraph.AGraph # pygraphviz.github.io/documentation/stable/tutorial.html#layout-and-drawing graph.layout(prog=kwargs.get("prog", "dot")) graph.draw(kwargs.get("path", "graph.svg"))
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/falkordb_graph.py
import warnings from typing import Any, Dict, List, Optional from langchain_core._api import deprecated from langchain_community.graphs.graph_document import GraphDocument from langchain_community.graphs.graph_store import GraphStore node_properties_query = """ MATCH (n) WITH keys(n) as keys, labels(n) AS labels WITH CASE WHEN keys = [] THEN [NULL] ELSE keys END AS keys, labels UNWIND labels AS label UNWIND keys AS key WITH label, collect(DISTINCT key) AS keys RETURN {label:label, keys:keys} AS output """ rel_properties_query = """ MATCH ()-[r]->() WITH keys(r) as keys, type(r) AS types WITH CASE WHEN keys = [] THEN [NULL] ELSE keys END AS keys, types UNWIND types AS type UNWIND keys AS key WITH type, collect(DISTINCT key) AS keys RETURN {types:type, keys:keys} AS output """ rel_query = """ MATCH (n)-[r]->(m) UNWIND labels(n) as src_label UNWIND labels(m) as dst_label UNWIND type(r) as rel_type RETURN DISTINCT {start: src_label, type: rel_type, end: dst_label} AS output """ class FalkorDBGraph(GraphStore): """FalkorDB wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, database: str, host: str = "localhost", port: int = 6379, username: Optional[str] = None, password: Optional[str] = None, ssl: bool = False, ) -> None: """Create a new FalkorDB graph wrapper instance.""" try: self.__init_falkordb_connection( database, host, port, username, password, ssl ) except ImportError: try: # Falls back to using the redis package just for backwards compatibility self.__init_redis_connection( database, host, port, username, password, ssl ) except ImportError: raise ImportError( "Could not import falkordb python package. " "Please install it with `pip install falkordb`." ) self.schema: str = "" self.structured_schema: Dict[str, Any] = {} try: self.refresh_schema() except Exception as e: raise ValueError(f"Could not refresh schema. Error: {e}") def __init_falkordb_connection( self, database: str, host: str = "localhost", port: int = 6379, username: Optional[str] = None, password: Optional[str] = None, ssl: bool = False, ) -> None: from falkordb import FalkorDB try: self._driver = FalkorDB( host=host, port=port, username=username, password=password, ssl=ssl ) except Exception as e: raise ConnectionError(f"Failed to connect to FalkorDB: {e}") self._graph = self._driver.select_graph(database) @deprecated("0.0.31", alternative="__init_falkordb_connection") def __init_redis_connection( self, database: str, host: str = "localhost", port: int = 6379, username: Optional[str] = None, password: Optional[str] = None, ssl: bool = False, ) -> None: import redis from redis.commands.graph import Graph # show deprecation warning warnings.warn( "Using the redis package is deprecated. " "Please use the falkordb package instead, " "install it with `pip install falkordb`.", DeprecationWarning, ) self._driver = redis.Redis( host=host, port=port, username=username, password=password, ssl=ssl ) self._graph = Graph(self._driver, database) @property def get_schema(self) -> str: """Returns the schema of the FalkorDB database""" return self.schema @property def get_structured_schema(self) -> Dict[str, Any]: """Returns the structured schema of the Graph""" return self.structured_schema def refresh_schema(self) -> None: """Refreshes the schema of the FalkorDB database""" node_properties: List[Any] = self.query(node_properties_query) rel_properties: List[Any] = self.query(rel_properties_query) relationships: List[Any] = self.query(rel_query) self.structured_schema = { "node_props": {el[0]["label"]: el[0]["keys"] for el in node_properties}, "rel_props": {el[0]["types"]: el[0]["keys"] for el in rel_properties}, "relationships": [el[0] for el in relationships], } self.schema = ( f"Node properties: {node_properties}\n" f"Relationships properties: {rel_properties}\n" f"Relationships: {relationships}\n" ) def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """Query FalkorDB database.""" try: data = self._graph.query(query, params) return data.result_set except Exception as e: raise ValueError("Generated Cypher Statement is not valid\n" f"{e}") def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False ) -> None: """ Take GraphDocument as input as uses it to construct a graph. """ for document in graph_documents: # Import nodes for node in document.nodes: self.query( ( f"MERGE (n:{node.type} {{id:'{node.id}'}}) " "SET n += $properties " "RETURN distinct 'done' AS result" ), {"properties": node.properties}, ) # Import relationships for rel in document.relationships: self.query( ( f"MATCH (a:{rel.source.type} {{id:'{rel.source.id}'}}), " f"(b:{rel.target.type} {{id:'{rel.target.id}'}}) " f"MERGE (a)-[r:{(rel.type.replace(' ', '_').upper())}]->(b) " "SET r += $properties " "RETURN distinct 'done' AS result" ), {"properties": rel.properties}, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/age_graph.py
from __future__ import annotations import json import re from hashlib import md5 from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Pattern, Tuple, Union from langchain_community.graphs.graph_document import GraphDocument from langchain_community.graphs.graph_store import GraphStore if TYPE_CHECKING: import psycopg2.extras class AGEQueryException(Exception): """Exception for the AGE queries.""" def __init__(self, exception: Union[str, Dict]) -> None: if isinstance(exception, dict): self.message = exception["message"] if "message" in exception else "unknown" self.details = exception["details"] if "details" in exception else "unknown" else: self.message = exception self.details = "unknown" def get_message(self) -> str: return self.message def get_details(self) -> Any: return self.details class AGEGraph(GraphStore): """ Apache AGE wrapper for graph operations. Args: graph_name (str): the name of the graph to connect to or create conf (Dict[str, Any]): the pgsql connection config passed directly to psycopg2.connect create (bool): if True and graph doesn't exist, attempt to create it *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ # python type mapping for providing readable types to LLM types = { "str": "STRING", "float": "DOUBLE", "int": "INTEGER", "list": "LIST", "dict": "MAP", "bool": "BOOLEAN", } # precompiled regex for checking chars in graph labels label_regex: Pattern = re.compile("[^0-9a-zA-Z]+") def __init__( self, graph_name: str, conf: Dict[str, Any], create: bool = True ) -> None: """Create a new AGEGraph instance.""" self.graph_name = graph_name # check that psycopg2 is installed try: import psycopg2 except ImportError: raise ImportError( "Could not import psycopg2 python package. " "Please install it with `pip install psycopg2`." ) self.connection = psycopg2.connect(**conf) with self._get_cursor() as curs: # check if graph with name graph_name exists graph_id_query = ( """SELECT graphid FROM ag_catalog.ag_graph WHERE name = '{}'""".format( graph_name ) ) curs.execute(graph_id_query) data = curs.fetchone() # if graph doesn't exist and create is True, create it if data is None: if create: create_statement = """ SELECT ag_catalog.create_graph('{}'); """.format(graph_name) try: curs.execute(create_statement) self.connection.commit() except psycopg2.Error as e: raise AGEQueryException( { "message": "Could not create the graph", "detail": str(e), } ) else: raise Exception( ( 'Graph "{}" does not exist in the database ' + 'and "create" is set to False' ).format(graph_name) ) curs.execute(graph_id_query) data = curs.fetchone() # store graph id and refresh the schema self.graphid = data.graphid self.refresh_schema() def _get_cursor(self) -> psycopg2.extras.NamedTupleCursor: """ get cursor, load age extension and set search path """ try: import psycopg2.extras except ImportError as e: raise ImportError( "Unable to import psycopg2, please install with " "`pip install -U psycopg2`." ) from e cursor = self.connection.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) cursor.execute("""LOAD 'age';""") cursor.execute("""SET search_path = ag_catalog, "$user", public;""") return cursor def _get_labels(self) -> Tuple[List[str], List[str]]: """ Get all labels of a graph (for both edges and vertices) by querying the graph metadata table directly Returns Tuple[List[str]]: 2 lists, the first containing vertex labels and the second containing edge labels """ e_labels_records = self.query( """MATCH ()-[e]-() RETURN collect(distinct label(e)) as labels""" ) e_labels = e_labels_records[0]["labels"] if e_labels_records else [] n_labels_records = self.query( """MATCH (n) RETURN collect(distinct label(n)) as labels""" ) n_labels = n_labels_records[0]["labels"] if n_labels_records else [] return n_labels, e_labels def _get_triples(self, e_labels: List[str]) -> List[Dict[str, str]]: """ Get a set of distinct relationship types (as a list of dicts) in the graph to be used as context by an llm. Args: e_labels (List[str]): a list of edge labels to filter for Returns: List[Dict[str, str]]: relationships as a list of dicts in the format "{'start':<from_label>, 'type':<edge_label>, 'end':<from_label>}" """ # age query to get distinct relationship types try: import psycopg2 except ImportError as e: raise ImportError( "Unable to import psycopg2, please install with " "`pip install -U psycopg2`." ) from e triple_query = """ SELECT * FROM ag_catalog.cypher('{graph_name}', $$ MATCH (a)-[e:`{e_label}`]->(b) WITH a,e,b LIMIT 3000 RETURN DISTINCT labels(a) AS from, type(e) AS edge, labels(b) AS to LIMIT 10 $$) AS (f agtype, edge agtype, t agtype); """ triple_schema = [] # iterate desired edge types and add distinct relationship types to result with self._get_cursor() as curs: for label in e_labels: q = triple_query.format(graph_name=self.graph_name, e_label=label) try: curs.execute(q) data = curs.fetchall() for d in data: # use json.loads to convert returned # strings to python primitives triple_schema.append( { "start": json.loads(d.f)[0], "type": json.loads(d.edge), "end": json.loads(d.t)[0], } ) except psycopg2.Error as e: raise AGEQueryException( { "message": "Error fetching triples", "detail": str(e), } ) return triple_schema def _get_triples_str(self, e_labels: List[str]) -> List[str]: """ Get a set of distinct relationship types (as a list of strings) in the graph to be used as context by an llm. Args: e_labels (List[str]): a list of edge labels to filter for Returns: List[str]: relationships as a list of strings in the format "(:`<from_label>`)-[:`<edge_label>`]->(:`<to_label>`)" """ triples = self._get_triples(e_labels) return self._format_triples(triples) @staticmethod def _format_triples(triples: List[Dict[str, str]]) -> List[str]: """ Convert a list of relationships from dictionaries to formatted strings to be better readable by an llm Args: triples (List[Dict[str,str]]): a list relationships in the form {'start':<from_label>, 'type':<edge_label>, 'end':<from_label>} Returns: List[str]: a list of relationships in the form "(:`<from_label>`)-[:`<edge_label>`]->(:`<to_label>`)" """ triple_template = "(:`{start}`)-[:`{type}`]->(:`{end}`)" triple_schema = [triple_template.format(**triple) for triple in triples] return triple_schema def _get_node_properties(self, n_labels: List[str]) -> List[Dict[str, Any]]: """ Fetch a list of available node properties by node label to be used as context for an llm Args: n_labels (List[str]): a list of node labels to filter for Returns: List[Dict[str, Any]]: a list of node labels and their corresponding properties in the form "{ 'labels': <node_label>, 'properties': [ { 'property': <property_name>, 'type': <property_type> },... ] }" """ try: import psycopg2 except ImportError as e: raise ImportError( "Unable to import psycopg2, please install with " "`pip install -U psycopg2`." ) from e # cypher query to fetch properties of a given label node_properties_query = """ SELECT * FROM ag_catalog.cypher('{graph_name}', $$ MATCH (a:`{n_label}`) RETURN properties(a) AS props LIMIT 100 $$) AS (props agtype); """ node_properties = [] with self._get_cursor() as curs: for label in n_labels: q = node_properties_query.format( graph_name=self.graph_name, n_label=label ) try: curs.execute(q) except psycopg2.Error as e: raise AGEQueryException( { "message": "Error fetching node properties", "detail": str(e), } ) data = curs.fetchall() # build a set of distinct properties s = set({}) for d in data: # use json.loads to convert to python # primitive and get readable type for k, v in json.loads(d.props).items(): s.add((k, self.types[type(v).__name__])) np = { "properties": [{"property": k, "type": v} for k, v in s], "labels": label, } node_properties.append(np) return node_properties def _get_edge_properties(self, e_labels: List[str]) -> List[Dict[str, Any]]: """ Fetch a list of available edge properties by edge label to be used as context for an llm Args: e_labels (List[str]): a list of edge labels to filter for Returns: List[Dict[str, Any]]: a list of edge labels and their corresponding properties in the form "{ 'labels': <edge_label>, 'properties': [ { 'property': <property_name>, 'type': <property_type> },... ] }" """ try: import psycopg2 except ImportError as e: raise ImportError( "Unable to import psycopg2, please install with " "`pip install -U psycopg2`." ) from e # cypher query to fetch properties of a given label edge_properties_query = """ SELECT * FROM ag_catalog.cypher('{graph_name}', $$ MATCH ()-[e:`{e_label}`]->() RETURN properties(e) AS props LIMIT 100 $$) AS (props agtype); """ edge_properties = [] with self._get_cursor() as curs: for label in e_labels: q = edge_properties_query.format( graph_name=self.graph_name, e_label=label ) try: curs.execute(q) except psycopg2.Error as e: raise AGEQueryException( { "message": "Error fetching edge properties", "detail": str(e), } ) data = curs.fetchall() # build a set of distinct properties s = set({}) for d in data: # use json.loads to convert to python # primitive and get readable type for k, v in json.loads(d.props).items(): s.add((k, self.types[type(v).__name__])) np = { "properties": [{"property": k, "type": v} for k, v in s], "type": label, } edge_properties.append(np) return edge_properties def refresh_schema(self) -> None: """ Refresh the graph schema information by updating the available labels, relationships, and properties """ # fetch graph schema information n_labels, e_labels = self._get_labels() triple_schema = self._get_triples(e_labels) node_properties = self._get_node_properties(n_labels) edge_properties = self._get_edge_properties(e_labels) # update the formatted string representation self.schema = f""" Node properties are the following: {node_properties} Relationship properties are the following: {edge_properties} The relationships are the following: {self._format_triples(triple_schema)} """ # update the dictionary representation self.structured_schema = { "node_props": {el["labels"]: el["properties"] for el in node_properties}, "rel_props": {el["type"]: el["properties"] for el in edge_properties}, "relationships": triple_schema, "metadata": {}, } @property def get_schema(self) -> str: """Returns the schema of the Graph""" return self.schema @property def get_structured_schema(self) -> Dict[str, Any]: """Returns the structured schema of the Graph""" return self.structured_schema @staticmethod def _get_col_name(field: str, idx: int) -> str: """ Convert a cypher return field to a pgsql select field If possible keep the cypher column name, but create a generic name if necessary Args: field (str): a return field from a cypher query to be formatted for pgsql idx (int): the position of the field in the return statement Returns: str: the field to be used in the pgsql select statement """ # remove white space field = field.strip() # if an alias is provided for the field, use it if " as " in field: return field.split(" as ")[-1].strip() # if the return value is an unnamed primitive, give it a generic name elif field.isnumeric() or field in ("true", "false", "null"): return f"column_{idx}" # otherwise return the value stripping out some common special chars else: return field.replace("(", "_").replace(")", "") @staticmethod def _wrap_query(query: str, graph_name: str) -> str: """ Convert a cypher query to an Apache Age compatible sql query by wrapping the cypher query in ag_catalog.cypher, casting results to agtype and building a select statement Args: query (str): a valid cypher query graph_name (str): the name of the graph to query Returns: str: an equivalent pgsql query """ # pgsql template template = """SELECT {projection} FROM ag_catalog.cypher('{graph_name}', $$ {query} $$) AS ({fields});""" # if there are any returned fields they must be added to the pgsql query if "return" in query.lower(): # parse return statement to identify returned fields fields = ( query.lower() .split("return")[-1] .split("distinct")[-1] .split("order by")[0] .split("skip")[0] .split("limit")[0] .split(",") ) # raise exception if RETURN * is found as we can't resolve the fields if "*" in [x.strip() for x in fields]: raise ValueError( "AGE graph does not support 'RETURN *'" + " statements in Cypher queries" ) # get pgsql formatted field names fields = [ AGEGraph._get_col_name(field, idx) for idx, field in enumerate(fields) ] # build resulting pgsql relation fields_str = ", ".join( [field.split(".")[-1] + " agtype" for field in fields] ) # if no return statement we still need to return a single field of type agtype else: fields_str = "a agtype" select_str = "*" return template.format( graph_name=graph_name, query=query, fields=fields_str, projection=select_str, ) @staticmethod def _record_to_dict(record: NamedTuple) -> Dict[str, Any]: """ Convert a record returned from an age query to a dictionary Args: record (): a record from an age query result Returns: Dict[str, Any]: a dictionary representation of the record where the dictionary key is the field name and the value is the value converted to a python type """ # result holder d = {} # prebuild a mapping of vertex_id to vertex mappings to be used # later to build edges vertices = {} for k in record._fields: v = getattr(record, k) # agtype comes back '{key: value}::type' which must be parsed if isinstance(v, str) and "::" in v: dtype = v.split("::")[-1] v = v.split("::")[0] if dtype == "vertex": vertex = json.loads(v) vertices[vertex["id"]] = vertex.get("properties") # iterate returned fields and parse appropriately for k in record._fields: v = getattr(record, k) if isinstance(v, str) and "::" in v: dtype = v.split("::")[-1] v = v.split("::")[0] else: dtype = "" if dtype == "vertex": d[k] = json.loads(v).get("properties") # convert edge from id-label->id by replacing id with node information # we only do this if the vertex was also returned in the query # this is an attempt to be consistent with neo4j implementation elif dtype == "edge": edge = json.loads(v) d[k] = ( vertices.get(edge["start_id"], {}), edge["label"], vertices.get(edge["end_id"], {}), ) else: d[k] = json.loads(v) if isinstance(v, str) else v return d def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """ Query the graph by taking a cypher query, converting it to an age compatible query, executing it and converting the result Args: query (str): a cypher query to be executed params (dict): parameters for the query (not used in this implementation) Returns: List[Dict[str, Any]]: a list of dictionaries containing the result set """ try: import psycopg2 except ImportError as e: raise ImportError( "Unable to import psycopg2, please install with " "`pip install -U psycopg2`." ) from e # convert cypher query to pgsql/age query wrapped_query = self._wrap_query(query, self.graph_name) # execute the query, rolling back on an error with self._get_cursor() as curs: try: curs.execute(wrapped_query) self.connection.commit() except psycopg2.Error as e: self.connection.rollback() raise AGEQueryException( { "message": "Error executing graph query: {}".format(query), "detail": str(e), } ) data = curs.fetchall() if data is None: result = [] # convert to dictionaries else: result = [self._record_to_dict(d) for d in data] return result @staticmethod def _format_properties( properties: Dict[str, Any], id: Union[str, None] = None ) -> str: """ Convert a dictionary of properties to a string representation that can be used in a cypher query insert/merge statement. Args: properties (Dict[str,str]): a dictionary containing node/edge properties id (Union[str, None]): the id of the node or None if none exists Returns: str: the properties dictionary as a properly formatted string """ props = [] # wrap property key in backticks to escape for k, v in properties.items(): prop = f"`{k}`: {json.dumps(v)}" props.append(prop) if id is not None and "id" not in properties: props.append( f"id: {json.dumps(id)}" if isinstance(id, str) else f"id: {id}" ) return "{" + ", ".join(props) + "}" @staticmethod def clean_graph_labels(label: str) -> str: """ remove any disallowed characters from a label and replace with '_' Args: label (str): the original label Returns: str: the sanitized version of the label """ return re.sub(AGEGraph.label_regex, "_", label) def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False ) -> None: """ insert a list of graph documents into the graph Args: graph_documents (List[GraphDocument]): the list of documents to be inserted include_source (bool): if True add nodes for the sources with MENTIONS edges to the entities they mention Returns: None """ # query for inserting nodes node_insert_query = ( """ MERGE (n:`{label}` {properties}) """ if not include_source else """ MERGE (n:`{label}` {properties}) MERGE (d:Document {d_properties}) MERGE (d)-[:MENTIONS]->(n) """ ) # query for inserting edges edge_insert_query = """ MERGE (from:`{f_label}` {f_properties}) MERGE (to:`{t_label}` {t_properties}) MERGE (from)-[:`{r_label}` {r_properties}]->(to) """ # iterate docs and insert them for doc in graph_documents: # if we are adding sources, create an id for the source if include_source: if not doc.source.metadata.get("id"): doc.source.metadata["id"] = md5( doc.source.page_content.encode("utf-8") ).hexdigest() # insert entity nodes for node in doc.nodes: node.properties["id"] = node.id if include_source: query = node_insert_query.format( label=node.type, properties=self._format_properties(node.properties), d_properties=self._format_properties(doc.source.metadata), ) else: query = node_insert_query.format( label=AGEGraph.clean_graph_labels(node.type), properties=self._format_properties(node.properties), ) self.query(query) # insert relationships for edge in doc.relationships: edge.source.properties["id"] = edge.source.id edge.target.properties["id"] = edge.target.id inputs = { "f_label": AGEGraph.clean_graph_labels(edge.source.type), "f_properties": self._format_properties(edge.source.properties), "t_label": AGEGraph.clean_graph_labels(edge.target.type), "t_properties": self._format_properties(edge.target.properties), "r_label": AGEGraph.clean_graph_labels(edge.type).upper(), "r_properties": self._format_properties(edge.properties), } query = edge_insert_query.format(**inputs) self.query(query)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/arangodb_graph.py
import os from math import ceil from typing import Any, Dict, List, Optional class ArangoGraph: """ArangoDB wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__(self, db: Any) -> None: """Create a new ArangoDB graph wrapper instance.""" self.set_db(db) self.set_schema() @property def db(self) -> Any: return self.__db @property def schema(self) -> Dict[str, Any]: return self.__schema def set_db(self, db: Any) -> None: from arango.database import Database if not isinstance(db, Database): msg = "**db** parameter must inherit from arango.database.Database" raise TypeError(msg) self.__db: Database = db self.set_schema() def set_schema(self, schema: Optional[Dict[str, Any]] = None) -> None: """ Set the schema of the ArangoDB Database. Auto-generates Schema if **schema** is None. """ self.__schema = self.generate_schema() if schema is None else schema def generate_schema( self, sample_ratio: float = 0 ) -> Dict[str, List[Dict[str, Any]]]: """ Generates the schema of the ArangoDB Database and returns it User can specify a **sample_ratio** (0 to 1) to determine the ratio of documents/edges used (in relation to the Collection size) to render each Collection Schema. """ if not 0 <= sample_ratio <= 1: raise ValueError("**sample_ratio** value must be in between 0 to 1") # Stores the Edge Relationships between each ArangoDB Document Collection graph_schema: List[Dict[str, Any]] = [ {"graph_name": g["name"], "edge_definitions": g["edge_definitions"]} for g in self.db.graphs() ] # Stores the schema of every ArangoDB Document/Edge collection collection_schema: List[Dict[str, Any]] = [] for collection in self.db.collections(): if collection["system"]: continue # Extract collection name, type, and size col_name: str = collection["name"] col_type: str = collection["type"] col_size: int = self.db.collection(col_name).count() # Skip collection if empty if col_size == 0: continue # Set number of ArangoDB documents/edges to retrieve limit_amount = ceil(sample_ratio * col_size) or 1 aql = f""" FOR doc in {col_name} LIMIT {limit_amount} RETURN doc """ doc: Dict[str, Any] properties: List[Dict[str, str]] = [] for doc in self.__db.aql.execute(aql): for key, value in doc.items(): properties.append({"name": key, "type": type(value).__name__}) collection_schema.append( { "collection_name": col_name, "collection_type": col_type, f"{col_type}_properties": properties, f"example_{col_type}": doc, } ) return {"Graph Schema": graph_schema, "Collection Schema": collection_schema} def query( self, query: str, top_k: Optional[int] = None, **kwargs: Any ) -> List[Dict[str, Any]]: """Query the ArangoDB database.""" import itertools cursor = self.__db.aql.execute(query, **kwargs) return [doc for doc in itertools.islice(cursor, top_k)] @classmethod def from_db_credentials( cls, url: Optional[str] = None, dbname: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> Any: """Convenience constructor that builds Arango DB from credentials. Args: url: Arango DB url. Can be passed in as named arg or set as environment var ``ARANGODB_URL``. Defaults to "http://localhost:8529". dbname: Arango DB name. Can be passed in as named arg or set as environment var ``ARANGODB_DBNAME``. Defaults to "_system". username: Can be passed in as named arg or set as environment var ``ARANGODB_USERNAME``. Defaults to "root". password: Can be passed ni as named arg or set as environment var ``ARANGODB_PASSWORD``. Defaults to "". Returns: An arango.database.StandardDatabase. """ db = get_arangodb_client( url=url, dbname=dbname, username=username, password=password ) return cls(db) def get_arangodb_client( url: Optional[str] = None, dbname: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> Any: """Get the Arango DB client from credentials. Args: url: Arango DB url. Can be passed in as named arg or set as environment var ``ARANGODB_URL``. Defaults to "http://localhost:8529". dbname: Arango DB name. Can be passed in as named arg or set as environment var ``ARANGODB_DBNAME``. Defaults to "_system". username: Can be passed in as named arg or set as environment var ``ARANGODB_USERNAME``. Defaults to "root". password: Can be passed ni as named arg or set as environment var ``ARANGODB_PASSWORD``. Defaults to "". Returns: An arango.database.StandardDatabase. """ try: from arango import ArangoClient except ImportError as e: raise ImportError( "Unable to import arango, please install with `pip install python-arango`." ) from e _url: str = url or os.environ.get("ARANGODB_URL", "http://localhost:8529") # type: ignore[assignment] _dbname: str = dbname or os.environ.get("ARANGODB_DBNAME", "_system") # type: ignore[assignment] _username: str = username or os.environ.get("ARANGODB_USERNAME", "root") # type: ignore[assignment] _password: str = password or os.environ.get("ARANGODB_PASSWORD", "") # type: ignore[assignment] return ArangoClient(_url).db(_dbname, _username, _password, verify=True)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/memgraph_graph.py
from langchain_community.graphs.neo4j_graph import Neo4jGraph SCHEMA_QUERY = """ CALL llm_util.schema("raw") YIELD * RETURN * """ class MemgraphGraph(Neo4jGraph): """Memgraph wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, url: str, username: str, password: str, *, database: str = "memgraph" ) -> None: """Create a new Memgraph graph wrapper instance.""" super().__init__(url, username, password, database=database) def refresh_schema(self) -> None: """ Refreshes the Memgraph graph schema information. """ db_structured_schema = self.query(SCHEMA_QUERY)[0].get("schema") assert db_structured_schema is not None self.structured_schema = db_structured_schema # Format node properties formatted_node_props = [] for node_name, properties in db_structured_schema["node_props"].items(): formatted_node_props.append( f"Node name: '{node_name}', Node properties: {properties}" ) # Format relationship properties formatted_rel_props = [] for rel_name, properties in db_structured_schema["rel_props"].items(): formatted_rel_props.append( f"Relationship name: '{rel_name}', " f"Relationship properties: {properties}" ) # Format relationships formatted_rels = [ f"(:{rel['start']})-[:{rel['type']}]->(:{rel['end']})" for rel in db_structured_schema["relationships"] ] self.schema = "\n".join( [ "Node properties are the following:", *formatted_node_props, "Relationship properties are the following:", *formatted_rel_props, "The relationships are the following:", *formatted_rels, ] )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/__init__.py
"""**Graphs** provide a natural language interface to graph databases.""" import importlib from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from langchain_community.graphs.arangodb_graph import ( ArangoGraph, ) from langchain_community.graphs.falkordb_graph import ( FalkorDBGraph, ) from langchain_community.graphs.gremlin_graph import ( GremlinGraph, ) from langchain_community.graphs.hugegraph import ( HugeGraph, ) from langchain_community.graphs.kuzu_graph import ( KuzuGraph, ) from langchain_community.graphs.memgraph_graph import ( MemgraphGraph, ) from langchain_community.graphs.nebula_graph import ( NebulaGraph, ) from langchain_community.graphs.neo4j_graph import ( Neo4jGraph, ) from langchain_community.graphs.neptune_graph import ( BaseNeptuneGraph, NeptuneAnalyticsGraph, NeptuneGraph, ) from langchain_community.graphs.neptune_rdf_graph import ( NeptuneRdfGraph, ) from langchain_community.graphs.networkx_graph import ( NetworkxEntityGraph, ) from langchain_community.graphs.ontotext_graphdb_graph import ( OntotextGraphDBGraph, ) from langchain_community.graphs.rdf_graph import ( RdfGraph, ) from langchain_community.graphs.tigergraph_graph import ( TigerGraph, ) __all__ = [ "ArangoGraph", "FalkorDBGraph", "GremlinGraph", "HugeGraph", "KuzuGraph", "BaseNeptuneGraph", "MemgraphGraph", "NebulaGraph", "Neo4jGraph", "NeptuneGraph", "NeptuneRdfGraph", "NeptuneAnalyticsGraph", "NetworkxEntityGraph", "OntotextGraphDBGraph", "RdfGraph", "TigerGraph", ] _module_lookup = { "ArangoGraph": "langchain_community.graphs.arangodb_graph", "FalkorDBGraph": "langchain_community.graphs.falkordb_graph", "GremlinGraph": "langchain_community.graphs.gremlin_graph", "HugeGraph": "langchain_community.graphs.hugegraph", "KuzuGraph": "langchain_community.graphs.kuzu_graph", "MemgraphGraph": "langchain_community.graphs.memgraph_graph", "NebulaGraph": "langchain_community.graphs.nebula_graph", "Neo4jGraph": "langchain_community.graphs.neo4j_graph", "BaseNeptuneGraph": "langchain_community.graphs.neptune_graph", "NeptuneAnalyticsGraph": "langchain_community.graphs.neptune_graph", "NeptuneGraph": "langchain_community.graphs.neptune_graph", "NeptuneRdfGraph": "langchain_community.graphs.neptune_rdf_graph", "NetworkxEntityGraph": "langchain_community.graphs.networkx_graph", "OntotextGraphDBGraph": "langchain_community.graphs.ontotext_graphdb_graph", "RdfGraph": "langchain_community.graphs.rdf_graph", "TigerGraph": "langchain_community.graphs.tigergraph_graph", } def __getattr__(name: str) -> Any: if name in _module_lookup: module = importlib.import_module(_module_lookup[name]) return getattr(module, name) raise AttributeError(f"module {__name__} has no attribute {name}")
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/neptune_graph.py
import json from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple, Union class NeptuneQueryException(Exception): """Exception for the Neptune queries.""" def __init__(self, exception: Union[str, Dict]): if isinstance(exception, dict): self.message = exception["message"] if "message" in exception else "unknown" self.details = exception["details"] if "details" in exception else "unknown" else: self.message = exception self.details = "unknown" def get_message(self) -> str: return self.message def get_details(self) -> Any: return self.details class BaseNeptuneGraph(ABC): """Abstract base class for Neptune.""" @property def get_schema(self) -> str: """Return the schema of the Neptune database""" return self.schema @abstractmethod def query(self, query: str, params: dict = {}) -> dict: raise NotImplementedError() @abstractmethod def _get_summary(self) -> Dict: raise NotImplementedError() def _get_labels(self) -> Tuple[List[str], List[str]]: """Get node and edge labels from the Neptune statistics summary""" summary = self._get_summary() n_labels = summary["nodeLabels"] e_labels = summary["edgeLabels"] return n_labels, e_labels def _get_triples(self, e_labels: List[str]) -> List[str]: triple_query = """ MATCH (a)-[e:`{e_label}`]->(b) WITH a,e,b LIMIT 3000 RETURN DISTINCT labels(a) AS from, type(e) AS edge, labels(b) AS to LIMIT 10 """ triple_template = "(:`{a}`)-[:`{e}`]->(:`{b}`)" triple_schema = [] for label in e_labels: q = triple_query.format(e_label=label) data = self.query(q) for d in data: triple = triple_template.format( a=d["from"][0], e=d["edge"], b=d["to"][0] ) triple_schema.append(triple) return triple_schema def _get_node_properties(self, n_labels: List[str], types: Dict) -> List: node_properties_query = """ MATCH (a:`{n_label}`) RETURN properties(a) AS props LIMIT 100 """ node_properties = [] for label in n_labels: q = node_properties_query.format(n_label=label) data = {"label": label, "properties": self.query(q)} s = set({}) for p in data["properties"]: for k, v in p["props"].items(): s.add((k, types[type(v).__name__])) np = { "properties": [{"property": k, "type": v} for k, v in s], "labels": label, } node_properties.append(np) return node_properties def _get_edge_properties(self, e_labels: List[str], types: Dict[str, Any]) -> List: edge_properties_query = """ MATCH ()-[e:`{e_label}`]->() RETURN properties(e) AS props LIMIT 100 """ edge_properties = [] for label in e_labels: q = edge_properties_query.format(e_label=label) data = {"label": label, "properties": self.query(q)} s = set({}) for p in data["properties"]: for k, v in p["props"].items(): s.add((k, types[type(v).__name__])) ep = { "type": label, "properties": [{"property": k, "type": v} for k, v in s], } edge_properties.append(ep) return edge_properties def _refresh_schema(self) -> None: """ Refreshes the Neptune graph schema information. """ types = { "str": "STRING", "float": "DOUBLE", "int": "INTEGER", "list": "LIST", "dict": "MAP", "bool": "BOOLEAN", } n_labels, e_labels = self._get_labels() triple_schema = self._get_triples(e_labels) node_properties = self._get_node_properties(n_labels, types) edge_properties = self._get_edge_properties(e_labels, types) self.schema = f""" Node properties are the following: {node_properties} Relationship properties are the following: {edge_properties} The relationships are the following: {triple_schema} """ class NeptuneAnalyticsGraph(BaseNeptuneGraph): """Neptune Analytics wrapper for graph operations. Parameters: client: optional boto3 Neptune client credentials_profile_name: optional AWS profile name region_name: optional AWS region, e.g., us-west-2 graph_identifier: the graph identifier for a Neptune Analytics graph Example: .. code-block:: python graph = NeptuneAnalyticsGraph( graph_identifier='<my-graph-id>' ) *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, graph_identifier: str, client: Any = None, credentials_profile_name: Optional[str] = None, region_name: Optional[str] = None, ) -> None: """Create a new Neptune Analytics graph wrapper instance.""" try: if client is not None: self.client = client else: import boto3 if credentials_profile_name is not None: session = boto3.Session(profile_name=credentials_profile_name) else: # use default credentials session = boto3.Session() self.graph_identifier = graph_identifier if region_name: self.client = session.client( "neptune-graph", region_name=region_name ) else: self.client = session.client("neptune-graph") except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: if type(e).__name__ == "UnknownServiceError": raise ImportError( "NeptuneGraph requires a boto3 version 1.34.40 or greater." "Please install it with `pip install -U boto3`." ) from e else: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e try: self._refresh_schema() except Exception as e: raise NeptuneQueryException( { "message": "Could not get schema for Neptune database", "detail": str(e), } ) def query(self, query: str, params: dict = {}) -> Dict[str, Any]: """Query Neptune database.""" try: resp = self.client.execute_query( graphIdentifier=self.graph_identifier, queryString=query, parameters=params, language="OPEN_CYPHER", ) return json.loads(resp["payload"].read().decode("UTF-8"))["results"] except Exception as e: raise NeptuneQueryException( { "message": "An error occurred while executing the query.", "details": str(e), } ) def _get_summary(self) -> Dict: try: response = self.client.get_graph_summary( graphIdentifier=self.graph_identifier, mode="detailed" ) except Exception as e: raise NeptuneQueryException( { "message": ("Summary API error occurred on Neptune Analytics"), "details": str(e), } ) try: summary = response["graphSummary"] except Exception: raise NeptuneQueryException( { "message": "Summary API did not return a valid response.", "details": response.content.decode(), } ) else: return summary class NeptuneGraph(BaseNeptuneGraph): """Neptune wrapper for graph operations. Parameters: host: endpoint for the database instance port: port number for the database instance, default is 8182 use_https: whether to use secure connection, default is True client: optional boto3 Neptune client credentials_profile_name: optional AWS profile name region_name: optional AWS region, e.g., us-west-2 sign: optional, whether to sign the request payload, default is True Example: .. code-block:: python graph = NeptuneGraph( host='<my-cluster>', port=8182 ) *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, host: str, port: int = 8182, use_https: bool = True, client: Any = None, credentials_profile_name: Optional[str] = None, region_name: Optional[str] = None, sign: bool = True, ) -> None: """Create a new Neptune graph wrapper instance.""" try: if client is not None: self.client = client else: import boto3 if credentials_profile_name is not None: session = boto3.Session(profile_name=credentials_profile_name) else: # use default credentials session = boto3.Session() client_params = {} if region_name: client_params["region_name"] = region_name protocol = "https" if use_https else "http" client_params["endpoint_url"] = f"{protocol}://{host}:{port}" if sign: self.client = session.client("neptunedata", **client_params) else: from botocore import UNSIGNED from botocore.config import Config self.client = session.client( "neptunedata", **client_params, config=Config(signature_version=UNSIGNED), ) except ImportError: raise ImportError( "Could not import boto3 python package. " "Please install it with `pip install boto3`." ) except Exception as e: if type(e).__name__ == "UnknownServiceError": raise ImportError( "NeptuneGraph requires a boto3 version 1.28.38 or greater." "Please install it with `pip install -U boto3`." ) from e else: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e try: self._refresh_schema() except Exception as e: raise NeptuneQueryException( { "message": "Could not get schema for Neptune database", "detail": str(e), } ) def query(self, query: str, params: dict = {}) -> Dict[str, Any]: """Query Neptune database.""" try: return self.client.execute_open_cypher_query(openCypherQuery=query)[ "results" ] except Exception as e: raise NeptuneQueryException( { "message": "An error occurred while executing the query.", "details": str(e), } ) def _get_summary(self) -> Dict: try: response = self.client.get_propertygraph_summary() except Exception as e: raise NeptuneQueryException( { "message": ( "Summary API is not available for this instance of Neptune," "ensure the engine version is >=1.2.1.0" ), "details": str(e), } ) try: summary = response["payload"]["graphSummary"] except Exception: raise NeptuneQueryException( { "message": "Summary API did not return a valid response.", "details": response.content.decode(), } ) else: return summary
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/neo4j_graph.py
from hashlib import md5 from typing import Any, Dict, List, Optional from langchain_core._api.deprecation import deprecated from langchain_core.utils import get_from_dict_or_env from langchain_community.graphs.graph_document import GraphDocument from langchain_community.graphs.graph_store import GraphStore BASE_ENTITY_LABEL = "__Entity__" EXCLUDED_LABELS = ["_Bloom_Perspective_", "_Bloom_Scene_"] EXCLUDED_RELS = ["_Bloom_HAS_SCENE_"] EXHAUSTIVE_SEARCH_LIMIT = 10000 LIST_LIMIT = 128 # Threshold for returning all available prop values in graph schema DISTINCT_VALUE_LIMIT = 10 node_properties_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "node" AND NOT label IN $EXCLUDED_LABELS WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {labels: nodeLabels, properties: properties} AS output """ rel_properties_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship" AND NOT label in $EXCLUDED_LABELS WITH label AS nodeLabels, collect({property:property, type:type}) AS properties RETURN {type: nodeLabels, properties: properties} AS output """ rel_query = """ CALL apoc.meta.data() YIELD label, other, elementType, type, property WHERE type = "RELATIONSHIP" AND elementType = "node" UNWIND other AS other_node WITH * WHERE NOT label IN $EXCLUDED_LABELS AND NOT other_node IN $EXCLUDED_LABELS RETURN {start: label, type: property, end: toString(other_node)} AS output """ include_docs_query = ( "MERGE (d:Document {id:$document.metadata.id}) " "SET d.text = $document.page_content " "SET d += $document.metadata " "WITH d " ) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.graphs.neo4j_graph.clean_string_values", ) def clean_string_values(text: str) -> str: """Clean string values for schema. Cleans the input text by replacing newline and carriage return characters. Args: text (str): The input text to clean. Returns: str: The cleaned text. """ return text.replace("\n", " ").replace("\r", " ") @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.graphs.neo4j_graph.value_sanitize", ) def value_sanitize(d: Any) -> Any: """Sanitize the input dictionary or list. Sanitizes the input by removing embedding-like values, lists with more than 128 elements, that are mostly irrelevant for generating answers in a LLM context. These properties, if left in results, can occupy significant context space and detract from the LLM's performance by introducing unnecessary noise and cost. Args: d (Any): The input dictionary or list to sanitize. Returns: Any: The sanitized dictionary or list. """ if isinstance(d, dict): new_dict = {} for key, value in d.items(): if isinstance(value, dict): sanitized_value = value_sanitize(value) if ( sanitized_value is not None ): # Check if the sanitized value is not None new_dict[key] = sanitized_value elif isinstance(value, list): if len(value) < LIST_LIMIT: sanitized_value = value_sanitize(value) if ( sanitized_value is not None ): # Check if the sanitized value is not None new_dict[key] = sanitized_value # Do not include the key if the list is oversized else: new_dict[key] = value return new_dict elif isinstance(d, list): if len(d) < LIST_LIMIT: return [ value_sanitize(item) for item in d if value_sanitize(item) is not None ] else: return None else: return d @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.graphs.neo4j_graph._get_node_import_query", ) def _get_node_import_query(baseEntityLabel: bool, include_source: bool) -> str: if baseEntityLabel: return ( f"{include_docs_query if include_source else ''}" "UNWIND $data AS row " f"MERGE (source:`{BASE_ENTITY_LABEL}` {{id: row.id}}) " "SET source += row.properties " f"{'MERGE (d)-[:MENTIONS]->(source) ' if include_source else ''}" "WITH source, row " "CALL apoc.create.addLabels( source, [row.type] ) YIELD node " "RETURN distinct 'done' AS result" ) else: return ( f"{include_docs_query if include_source else ''}" "UNWIND $data AS row " "CALL apoc.merge.node([row.type], {id: row.id}, " "row.properties, {}) YIELD node " f"{'MERGE (d)-[:MENTIONS]->(node) ' if include_source else ''}" "RETURN distinct 'done' AS result" ) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.graphs.neo4j_graph._get_rel_import_query", ) def _get_rel_import_query(baseEntityLabel: bool) -> str: if baseEntityLabel: return ( "UNWIND $data AS row " f"MERGE (source:`{BASE_ENTITY_LABEL}` {{id: row.source}}) " f"MERGE (target:`{BASE_ENTITY_LABEL}` {{id: row.target}}) " "WITH source, target, row " "CALL apoc.merge.relationship(source, row.type, " "{}, row.properties, target) YIELD rel " "RETURN distinct 'done'" ) else: return ( "UNWIND $data AS row " "CALL apoc.merge.node([row.source_label], {id: row.source}," "{}, {}) YIELD node as source " "CALL apoc.merge.node([row.target_label], {id: row.target}," "{}, {}) YIELD node as target " "CALL apoc.merge.relationship(source, row.type, " "{}, row.properties, target) YIELD rel " "RETURN distinct 'done'" ) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.graphs.neo4j_graph._format_schema", ) def _format_schema(schema: Dict, is_enhanced: bool) -> str: formatted_node_props = [] formatted_rel_props = [] if is_enhanced: # Enhanced formatting for nodes for node_type, properties in schema["node_props"].items(): formatted_node_props.append(f"- **{node_type}**") for prop in properties: example = "" if prop["type"] == "STRING" and prop.get("values"): if prop.get("distinct_count", 11) > DISTINCT_VALUE_LIMIT: example = ( f'Example: "{clean_string_values(prop["values"][0])}"' if prop["values"] else "" ) else: # If less than 10 possible values return all example = ( ( "Available options: " f'{[clean_string_values(el) for el in prop["values"]]}' ) if prop["values"] else "" ) elif prop["type"] in [ "INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME", ]: if prop.get("min") is not None: example = f'Min: {prop["min"]}, Max: {prop["max"]}' else: example = ( f'Example: "{prop["values"][0]}"' if prop.get("values") else "" ) elif prop["type"] == "LIST": # Skip embeddings if not prop.get("min_size") or prop["min_size"] > LIST_LIMIT: continue example = ( f'Min Size: {prop["min_size"]}, Max Size: {prop["max_size"]}' ) formatted_node_props.append( f" - `{prop['property']}`: {prop['type']} {example}" ) # Enhanced formatting for relationships for rel_type, properties in schema["rel_props"].items(): formatted_rel_props.append(f"- **{rel_type}**") for prop in properties: example = "" if prop["type"] == "STRING": if prop.get("distinct_count", 11) > DISTINCT_VALUE_LIMIT: example = ( f'Example: "{clean_string_values(prop["values"][0])}"' if prop["values"] else "" ) else: # If less than 10 possible values return all example = ( ( "Available options: " f'{[clean_string_values(el) for el in prop["values"]]}' ) if prop["values"] else "" ) elif prop["type"] in [ "INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME", ]: if prop.get("min"): # If we have min/max example = f'Min: {prop["min"]}, Max: {prop["max"]}' else: # return a single value example = ( f'Example: "{prop["values"][0]}"' if prop["values"] else "" ) elif prop["type"] == "LIST": # Skip embeddings if not prop.get("min_size") or prop["min_size"] > LIST_LIMIT: continue example = ( f'Min Size: {prop["min_size"]}, Max Size: {prop["max_size"]}' ) formatted_rel_props.append( f" - `{prop['property']}: {prop['type']}` {example}" ) else: # Format node properties for label, props in schema["node_props"].items(): props_str = ", ".join( [f"{prop['property']}: {prop['type']}" for prop in props] ) formatted_node_props.append(f"{label} {{{props_str}}}") # Format relationship properties using structured_schema for type, props in schema["rel_props"].items(): props_str = ", ".join( [f"{prop['property']}: {prop['type']}" for prop in props] ) formatted_rel_props.append(f"{type} {{{props_str}}}") # Format relationships formatted_rels = [ f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for el in schema["relationships"] ] return "\n".join( [ "Node properties:", "\n".join(formatted_node_props), "Relationship properties:", "\n".join(formatted_rel_props), "The relationships:", "\n".join(formatted_rels), ] ) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.graphs.neo4j_graph._remove_backticks", ) def _remove_backticks(text: str) -> str: return text.replace("`", "") @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.Neo4jGraph", ) class Neo4jGraph(GraphStore): """Neo4j database wrapper for various graph operations. Parameters: url (Optional[str]): The URL of the Neo4j database server. username (Optional[str]): The username for database authentication. password (Optional[str]): The password for database authentication. database (str): The name of the database to connect to. Default is 'neo4j'. timeout (Optional[float]): The timeout for transactions in seconds. Useful for terminating long-running queries. By default, there is no timeout set. sanitize (bool): A flag to indicate whether to remove lists with more than 128 elements from results. Useful for removing embedding-like properties from database responses. Default is False. refresh_schema (bool): A flag whether to refresh schema information at initialization. Default is True. enhanced_schema (bool): A flag whether to scan the database for example values and use them in the graph schema. Default is False. driver_config (Dict): Configuration passed to Neo4j Driver. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__( self, url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, database: Optional[str] = None, timeout: Optional[float] = None, sanitize: bool = False, refresh_schema: bool = True, *, driver_config: Optional[Dict] = None, enhanced_schema: bool = False, ) -> None: """Create a new Neo4j graph wrapper instance.""" try: import neo4j except ImportError: raise ImportError( "Could not import neo4j python package. " "Please install it with `pip install neo4j`." ) url = get_from_dict_or_env({"url": url}, "url", "NEO4J_URI") # if username and password are "", assume Neo4j auth is disabled if username == "" and password == "": auth = None else: username = get_from_dict_or_env( {"username": username}, "username", "NEO4J_USERNAME", ) password = get_from_dict_or_env( {"password": password}, "password", "NEO4J_PASSWORD", ) auth = (username, password) database = get_from_dict_or_env( {"database": database}, "database", "NEO4J_DATABASE", "neo4j" ) self._driver = neo4j.GraphDatabase.driver( url, auth=auth, **(driver_config or {}) ) self._database = database self.timeout = timeout self.sanitize = sanitize self._enhanced_schema = enhanced_schema self.schema: str = "" self.structured_schema: Dict[str, Any] = {} # Verify connection try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the url is correct" ) except neo4j.exceptions.AuthError: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the username and password are correct" ) # Set schema if refresh_schema: try: self.refresh_schema() except neo4j.exceptions.ClientError as e: if e.code == "Neo.ClientError.Procedure.ProcedureNotFound": raise ValueError( "Could not use APOC procedures. " "Please ensure the APOC plugin is installed in Neo4j and that " "'apoc.meta.data()' is allowed in Neo4j configuration " ) raise e @property def get_schema(self) -> str: """Returns the schema of the Graph""" return self.schema @property def get_structured_schema(self) -> Dict[str, Any]: """Returns the structured schema of the Graph""" return self.structured_schema def query( self, query: str, params: dict = {}, ) -> List[Dict[str, Any]]: """Query Neo4j database. Args: query (str): The Cypher query to execute. params (dict): The parameters to pass to the query. Returns: List[Dict[str, Any]]: The list of dictionaries containing the query results. """ from neo4j import Query from neo4j.exceptions import Neo4jError try: data, _, _ = self._driver.execute_query( Query(text=query, timeout=self.timeout), database_=self._database, parameters_=params, ) json_data = [r.data() for r in data] if self.sanitize: json_data = [value_sanitize(el) for el in json_data] return json_data except Neo4jError as e: if not ( ( ( # isCallInTransactionError e.code == "Neo.DatabaseError.Statement.ExecutionFailed" or e.code == "Neo.DatabaseError.Transaction.TransactionStartFailed" ) and "in an implicit transaction" in e.message # type: ignore[operator] ) or ( # isPeriodicCommitError e.code == "Neo.ClientError.Statement.SemanticError" and ( "in an open transaction is not possible" in e.message # type: ignore[operator] or "tried to execute in an explicit transaction" in e.message # type: ignore[operator] ) ) ): raise # fallback to allow implicit transactions with self._driver.session(database=self._database) as session: data = session.run(Query(text=query, timeout=self.timeout), params) # type: ignore[assignment] json_data = [r.data() for r in data] if self.sanitize: json_data = [value_sanitize(el) for el in json_data] return json_data def refresh_schema(self) -> None: """ Refreshes the Neo4j graph schema information. """ from neo4j.exceptions import ClientError, CypherTypeError node_properties = [ el["output"] for el in self.query( node_properties_query, params={"EXCLUDED_LABELS": EXCLUDED_LABELS + [BASE_ENTITY_LABEL]}, ) ] rel_properties = [ el["output"] for el in self.query( rel_properties_query, params={"EXCLUDED_LABELS": EXCLUDED_RELS} ) ] relationships = [ el["output"] for el in self.query( rel_query, params={"EXCLUDED_LABELS": EXCLUDED_LABELS + [BASE_ENTITY_LABEL]}, ) ] # Get constraints & indexes try: constraint = self.query("SHOW CONSTRAINTS") index = self.query( "CALL apoc.schema.nodes() YIELD label, properties, type, size, " "valuesSelectivity WHERE type = 'RANGE' RETURN *, " "size * valuesSelectivity as distinctValues" ) except ( ClientError ): # Read-only user might not have access to schema information constraint = [] index = [] self.structured_schema = { "node_props": {el["labels"]: el["properties"] for el in node_properties}, "rel_props": {el["type"]: el["properties"] for el in rel_properties}, "relationships": relationships, "metadata": {"constraint": constraint, "index": index}, } if self._enhanced_schema: schema_counts = self.query( "CALL apoc.meta.graphSample() YIELD nodes, relationships " "RETURN nodes, [rel in relationships | {name:apoc.any.property" "(rel, 'type'), count: apoc.any.property(rel, 'count')}]" " AS relationships" ) # Update node info for node in schema_counts[0]["nodes"]: # Skip bloom labels if node["name"] in EXCLUDED_LABELS: continue node_props = self.structured_schema["node_props"].get(node["name"]) if not node_props: # The node has no properties continue enhanced_cypher = self._enhanced_schema_cypher( node["name"], node_props, node["count"] < EXHAUSTIVE_SEARCH_LIMIT ) # Due to schema-flexible nature of neo4j errors can happen try: enhanced_info = self.query(enhanced_cypher)[0]["output"] for prop in node_props: if prop["property"] in enhanced_info: prop.update(enhanced_info[prop["property"]]) except CypherTypeError: continue # Update rel info for rel in schema_counts[0]["relationships"]: # Skip bloom labels if rel["name"] in EXCLUDED_RELS: continue rel_props = self.structured_schema["rel_props"].get(rel["name"]) if not rel_props: # The rel has no properties continue enhanced_cypher = self._enhanced_schema_cypher( rel["name"], rel_props, rel["count"] < EXHAUSTIVE_SEARCH_LIMIT, is_relationship=True, ) try: enhanced_info = self.query(enhanced_cypher)[0]["output"] for prop in rel_props: if prop["property"] in enhanced_info: prop.update(enhanced_info[prop["property"]]) # Due to schema-flexible nature of neo4j errors can happen except CypherTypeError: continue schema = _format_schema(self.structured_schema, self._enhanced_schema) self.schema = schema def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False, baseEntityLabel: bool = False, ) -> None: """ This method constructs nodes and relationships in the graph based on the provided GraphDocument objects. Parameters: - graph_documents (List[GraphDocument]): A list of GraphDocument objects that contain the nodes and relationships to be added to the graph. Each GraphDocument should encapsulate the structure of part of the graph, including nodes, relationships, and the source document information. - include_source (bool, optional): If True, stores the source document and links it to nodes in the graph using the MENTIONS relationship. This is useful for tracing back the origin of data. Merges source documents based on the `id` property from the source document metadata if available; otherwise it calculates the MD5 hash of `page_content` for merging process. Defaults to False. - baseEntityLabel (bool, optional): If True, each newly created node gets a secondary __Entity__ label, which is indexed and improves import speed and performance. Defaults to False. """ if baseEntityLabel: # Check if constraint already exists constraint_exists = any( [ el["labelsOrTypes"] == [BASE_ENTITY_LABEL] and el["properties"] == ["id"] for el in self.structured_schema.get("metadata", {}).get( "constraint", [] ) ] ) if not constraint_exists: # Create constraint self.query( f"CREATE CONSTRAINT IF NOT EXISTS FOR (b:{BASE_ENTITY_LABEL}) " "REQUIRE b.id IS UNIQUE;" ) self.refresh_schema() # Refresh constraint information node_import_query = _get_node_import_query(baseEntityLabel, include_source) rel_import_query = _get_rel_import_query(baseEntityLabel) for document in graph_documents: if not document.source.metadata.get("id"): document.source.metadata["id"] = md5( document.source.page_content.encode("utf-8") ).hexdigest() # Remove backticks from node types for node in document.nodes: node.type = _remove_backticks(node.type) # Import nodes self.query( node_import_query, { "data": [el.__dict__ for el in document.nodes], "document": document.source.__dict__, }, ) # Import relationships self.query( rel_import_query, { "data": [ { "source": el.source.id, "source_label": _remove_backticks(el.source.type), "target": el.target.id, "target_label": _remove_backticks(el.target.type), "type": _remove_backticks( el.type.replace(" ", "_").upper() ), "properties": el.properties, } for el in document.relationships ] }, ) def _enhanced_schema_cypher( self, label_or_type: str, properties: List[Dict[str, Any]], exhaustive: bool, is_relationship: bool = False, ) -> str: if is_relationship: match_clause = f"MATCH ()-[n:`{label_or_type}`]->()" else: match_clause = f"MATCH (n:`{label_or_type}`)" with_clauses = [] return_clauses = [] output_dict = {} if exhaustive: for prop in properties: prop_name = prop["property"] prop_type = prop["type"] if prop_type == "STRING": with_clauses.append( ( f"collect(distinct substring(toString(n.`{prop_name}`)" f", 0, 50)) AS `{prop_name}_values`" ) ) return_clauses.append( ( f"values:`{prop_name}_values`[..{DISTINCT_VALUE_LIMIT}]," f" distinct_count: size(`{prop_name}_values`)" ) ) elif prop_type in [ "INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME", ]: with_clauses.append(f"min(n.`{prop_name}`) AS `{prop_name}_min`") with_clauses.append(f"max(n.`{prop_name}`) AS `{prop_name}_max`") with_clauses.append( f"count(distinct n.`{prop_name}`) AS `{prop_name}_distinct`" ) return_clauses.append( ( f"min: toString(`{prop_name}_min`), " f"max: toString(`{prop_name}_max`), " f"distinct_count: `{prop_name}_distinct`" ) ) elif prop_type == "LIST": with_clauses.append( ( f"min(size(n.`{prop_name}`)) AS `{prop_name}_size_min`, " f"max(size(n.`{prop_name}`)) AS `{prop_name}_size_max`" ) ) return_clauses.append( f"min_size: `{prop_name}_size_min`, " f"max_size: `{prop_name}_size_max`" ) elif prop_type in ["BOOLEAN", "POINT", "DURATION"]: continue output_dict[prop_name] = "{" + return_clauses.pop() + "}" else: # Just sample 5 random nodes match_clause += " WITH n LIMIT 5" for prop in properties: prop_name = prop["property"] prop_type = prop["type"] # Check if indexed property, we can still do exhaustive prop_index = [ el for el in self.structured_schema["metadata"]["index"] if el["label"] == label_or_type and el["properties"] == [prop_name] and el["type"] == "RANGE" ] if prop_type == "STRING": if ( prop_index and prop_index[0].get("size") > 0 and prop_index[0].get("distinctValues") <= DISTINCT_VALUE_LIMIT ): distinct_values = self.query( f"CALL apoc.schema.properties.distinct(" f"'{label_or_type}', '{prop_name}') YIELD value" )[0]["value"] return_clauses.append( ( f"values: {distinct_values}," f" distinct_count: {len(distinct_values)}" ) ) else: with_clauses.append( ( f"collect(distinct substring(toString(n.`{prop_name}`)" f", 0, 50)) AS `{prop_name}_values`" ) ) return_clauses.append(f"values: `{prop_name}_values`") elif prop_type in [ "INTEGER", "FLOAT", "DATE", "DATE_TIME", "LOCAL_DATE_TIME", ]: if not prop_index: with_clauses.append( f"collect(distinct toString(n.`{prop_name}`)) " f"AS `{prop_name}_values`" ) return_clauses.append(f"values: `{prop_name}_values`") else: with_clauses.append( f"min(n.`{prop_name}`) AS `{prop_name}_min`" ) with_clauses.append( f"max(n.`{prop_name}`) AS `{prop_name}_max`" ) with_clauses.append( f"count(distinct n.`{prop_name}`) AS `{prop_name}_distinct`" ) return_clauses.append( ( f"min: toString(`{prop_name}_min`), " f"max: toString(`{prop_name}_max`), " f"distinct_count: `{prop_name}_distinct`" ) ) elif prop_type == "LIST": with_clauses.append( ( f"min(size(n.`{prop_name}`)) AS `{prop_name}_size_min`, " f"max(size(n.`{prop_name}`)) AS `{prop_name}_size_max`" ) ) return_clauses.append( ( f"min_size: `{prop_name}_size_min`, " f"max_size: `{prop_name}_size_max`" ) ) elif prop_type in ["BOOLEAN", "POINT", "DURATION"]: continue output_dict[prop_name] = "{" + return_clauses.pop() + "}" with_clause = "WITH " + ",\n ".join(with_clauses) return_clause = ( "RETURN {" + ", ".join(f"`{k}`: {v}" for k, v in output_dict.items()) + "} AS output" ) # Combine all parts of the Cypher query cypher_query = "\n".join([match_clause, with_clause, return_clause]) return cypher_query
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/tigergraph_graph.py
from typing import Any, Dict, List, Optional from langchain_community.graphs.graph_store import GraphStore class TigerGraph(GraphStore): """TigerGraph wrapper for graph operations. *Security note*: Make sure that the database connection uses credentials that are narrowly-scoped to only include necessary permissions. Failure to do so may result in data corruption or loss, since the calling code may attempt commands that would result in deletion, mutation of data if appropriately prompted or reading sensitive data if such data is present in the database. The best way to guard against such negative outcomes is to (as appropriate) limit the permissions granted to the credentials used with this tool. See https://python.langchain.com/docs/security for more information. """ def __init__(self, conn: Any) -> None: """Create a new TigerGraph graph wrapper instance.""" self.set_connection(conn) self.set_schema() @property def conn(self) -> Any: return self._conn @property def schema(self) -> Dict[str, Any]: return self._schema def get_schema(self) -> str: # type: ignore[override] if self._schema: return str(self._schema) else: self.set_schema() return str(self._schema) def set_connection(self, conn: Any) -> None: try: from pyTigerGraph import TigerGraphConnection except ImportError: raise ImportError( "Could not import pyTigerGraph python package. " "Please install it with `pip install pyTigerGraph`." ) if not isinstance(conn, TigerGraphConnection): msg = "**conn** parameter must inherit from TigerGraphConnection" raise TypeError(msg) if conn.ai.nlqs_host is None: msg = """**conn** parameter does not have nlqs_host parameter defined. Define hostname of NLQS service.""" raise ConnectionError(msg) self._conn: TigerGraphConnection = conn self.set_schema() def set_schema(self, schema: Optional[Dict[str, Any]] = None) -> None: """ Set the schema of the TigerGraph Database. Auto-generates Schema if **schema** is None. """ self._schema = self.generate_schema() if schema is None else schema def generate_schema( self, ) -> Dict[str, List[Dict[str, Any]]]: """ Generates the schema of the TigerGraph Database and returns it User can specify a **sample_ratio** (0 to 1) to determine the ratio of documents/edges used (in relation to the Collection size) to render each Collection Schema. """ return self._conn.getSchema(force=True) def refresh_schema(self): # type: ignore[no-untyped-def] self.generate_schema() def query(self, query: str) -> Dict[str, Any]: # type: ignore[override] """Query the TigerGraph database.""" answer = self._conn.ai.query(query) return answer def register_query( self, function_header: str, description: str, docstring: str, param_types: dict = {}, ) -> List[str]: """ Wrapper function to register a custom GSQL query to the TigerGraph NLQS. """ return self._conn.ai.registerCustomQuery( function_header, description, docstring, param_types )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/graphs/graph_store.py
from abc import abstractmethod from typing import Any, Dict, List from langchain_community.graphs.graph_document import GraphDocument class GraphStore: """Abstract class for graph operations.""" @property @abstractmethod def get_schema(self) -> str: """Return the schema of the Graph database""" pass @property @abstractmethod def get_structured_schema(self) -> Dict[str, Any]: """Return the schema of the Graph database""" pass @abstractmethod def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: """Query the graph.""" pass @abstractmethod def refresh_schema(self) -> None: """Refresh the graph schema information.""" pass @abstractmethod def add_graph_documents( self, graph_documents: List[GraphDocument], include_source: bool = False ) -> None: """Take GraphDocument as input as uses it to construct a graph.""" pass
0
lc_public_repos/langchain/libs/community/langchain_community/agents
lc_public_repos/langchain/libs/community/langchain_community/agents/openai_assistant/base.py
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Type, Union, ) from langchain.agents.openai_assistant.base import OpenAIAssistantRunnable, OutputType from langchain_core._api import beta from langchain_core.callbacks import CallbackManager from langchain_core.load import dumpd from langchain_core.runnables import RunnableConfig, ensure_config from langchain_core.tools import BaseTool from langchain_core.utils.function_calling import convert_to_openai_tool from pydantic import BaseModel, Field, model_validator from typing_extensions import Self if TYPE_CHECKING: import openai from openai._types import NotGiven from openai.types.beta.assistant import ToolResources as AssistantToolResources def _get_openai_client() -> openai.OpenAI: """Get the OpenAI client. Returns: openai.OpenAI: OpenAI client Raises: ImportError: If `openai` is not installed. AttributeError: If the installed `openai` version is not compatible. """ try: import openai return openai.OpenAI(default_headers={"OpenAI-Beta": "assistants=v2"}) except ImportError as e: raise ImportError( "Unable to import openai, please install with `pip install openai`." ) from e except AttributeError as e: raise AttributeError( "Please make sure you are using a v1.23-compatible version of openai. You " 'can install with `pip install "openai>=1.23"`.' ) from e def _get_openai_async_client() -> openai.AsyncOpenAI: """Get the async OpenAI client. Returns: openai.AsyncOpenAI: Async OpenAI client Raises: ImportError: If `openai` is not installed. AttributeError: If the installed `openai` version is not compatible. """ try: import openai return openai.AsyncOpenAI(default_headers={"OpenAI-Beta": "assistants=v2"}) except ImportError as e: raise ImportError( "Unable to import openai, please install with `pip install openai`." ) from e except AttributeError as e: raise AttributeError( "Please make sure you are using a v1.23-compatible version of openai. You " 'can install with `pip install "openai>=1.23"`.' ) from e def _convert_file_ids_into_attachments(file_ids: list) -> list: """Convert file_ids into attachments File search and Code interpreter will be turned on by default. Args: file_ids (list): List of file_ids that need to be converted into attachments. Returns: list: List of attachments converted from file_ids. """ attachments = [] for id in file_ids: attachments.append( { "file_id": id, "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], } ) return attachments def _is_assistants_builtin_tool( tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool], ) -> bool: """Determine if tool corresponds to OpenAI Assistants built-in. Args: tool (Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]): Tool that needs to be determined. Returns: A boolean response of true or false indicating if the tool corresponds to OpenAI Assistants built-in. """ assistants_builtin_tools = ("code_interpreter", "retrieval", "file_search") return ( isinstance(tool, dict) and ("type" in tool) and (tool["type"] in assistants_builtin_tools) ) def _get_assistants_tool( tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool], ) -> Dict[str, Any]: """Convert a raw function/class to an OpenAI tool. Note that OpenAI assistants supports several built-in tools, such as "code_interpreter" and "retrieval." Args: tool (Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]): Tools or functions that need to be converted to OpenAI tools. Returns: Dict[str, Any]: A dictionary of tools that are converted into OpenAI tools. """ if _is_assistants_builtin_tool(tool): return tool # type: ignore else: return convert_to_openai_tool(tool) @beta() class OpenAIAssistantV2Runnable(OpenAIAssistantRunnable): """Run an OpenAI Assistant. Attributes: client (Any): OpenAI or AzureOpenAI client. async_client (Any): Async OpenAI or AzureOpenAI client. assistant_id (str): OpenAI assistant ID. check_every_ms (float): Frequency to check progress in milliseconds. as_agent (bool): Whether to use the assistant as a LangChain agent. Example using OpenAI tools: .. code-block:: python from langchain.agents.openai_assistant import OpenAIAssistantV2Runnable assistant = OpenAIAssistantV2Runnable.create_assistant( name="math assistant", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=[{"type": "code_interpreter"}], model="gpt-4-1106-preview" ) output = assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"}) Example using custom tools and AgentExecutor: .. code-block:: python from langchain.agents.openai_assistant import OpenAIAssistantV2Runnable from langchain.agents import AgentExecutor from langchain.tools import E2BDataAnalysisTool tools = [E2BDataAnalysisTool(api_key="...")] agent = OpenAIAssistantV2Runnable.create_assistant( name="langchain assistant e2b tool", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=tools, model="gpt-4-1106-preview", as_agent=True ) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"content": "Analyze the data..."}) Example using custom tools and custom execution: .. code-block:: python from langchain.agents.openai_assistant import OpenAIAssistantV2Runnable from langchain.agents import AgentExecutor from langchain_core.agents import AgentFinish from langchain.tools import E2BDataAnalysisTool tools = [E2BDataAnalysisTool(api_key="...")] agent = OpenAIAssistantV2Runnable.create_assistant( name="langchain assistant e2b tool", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=tools, model="gpt-4-1106-preview", as_agent=True ) def execute_agent(agent, tools, input): tool_map = {tool.name: tool for tool in tools} response = agent.invoke(input) while not isinstance(response, AgentFinish): tool_outputs = [] for action in response: tool_output = tool_map[action.tool].invoke(action.tool_input) tool_outputs.append({"output": tool_output, "tool_call_id": action.tool_call_id}) response = agent.invoke( { "tool_outputs": tool_outputs, "run_id": action.run_id, "thread_id": action.thread_id } ) return response response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"}) next_response = execute_agent(agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}) """ # noqa: E501 client: Any = Field(default_factory=_get_openai_client) """OpenAI or AzureOpenAI client.""" async_client: Any = None """OpenAI or AzureOpenAI async client.""" assistant_id: str """OpenAI assistant id.""" check_every_ms: float = 1_000.0 """Frequency with which to check run progress in milliseconds.""" as_agent: bool = False """Use as a LangChain agent, compatible with the AgentExecutor.""" @model_validator(mode="after") def validate_async_client(self) -> Self: """Validate that the async client is set, otherwise initialize it.""" if self.async_client is None: import openai api_key = self.client.api_key self.async_client = openai.AsyncOpenAI(api_key=api_key) return self @classmethod def create_assistant( cls, name: str, instructions: str, tools: Sequence[Union[BaseTool, dict]], model: str, *, model_kwargs: dict[str, float] = {}, client: Optional[Union[openai.OpenAI, openai.AzureOpenAI]] = None, tool_resources: Optional[Union[AssistantToolResources, dict, NotGiven]] = None, extra_body: Optional[object] = None, **kwargs: Any, ) -> OpenAIAssistantRunnable: """Create an OpenAI Assistant and instantiate the Runnable. Args: name (str): Assistant name. instructions (str): Assistant instructions. tools (Sequence[Union[BaseTool, dict]]): Assistant tools. Can be passed in OpenAI format or as BaseTools. tool_resources (Optional[Union[AssistantToolResources, dict, NotGiven]]): Assistant tool resources. Can be passed in OpenAI format. model (str): Assistant model to use. client (Optional[Union[openai.OpenAI, openai.AzureOpenAI]]): OpenAI or AzureOpenAI client. Will create default OpenAI client (Assistant v2) if not specified. model_kwargs: Additional model arguments. Only available for temperature and top_p parameters. extra_body: Additional body parameters to be passed to the assistant. Returns: OpenAIAssistantRunnable: The configured assistant runnable. """ client = client or _get_openai_client() if tool_resources is None: from openai._types import NOT_GIVEN tool_resources = NOT_GIVEN assistant = client.beta.assistants.create( name=name, instructions=instructions, tools=[_get_assistants_tool(tool) for tool in tools], # type: ignore tool_resources=tool_resources, # type: ignore[arg-type] model=model, extra_body=extra_body, **model_kwargs, ) return cls(assistant_id=assistant.id, client=client, **kwargs) def invoke( self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> OutputType: """Invoke the assistant. Args: input (dict): Runnable input dict that can have: content: User message when starting a new run. thread_id: Existing thread to use. run_id: Existing run to use. Should only be supplied when providing the tool output for a required action after an initial invocation. file_ids: (deprecated) File ids to include in new run. Use 'attachments' instead attachments: Assistant files to include in new run. (v2 API). message_metadata: Metadata to associate with new message. thread_metadata: Metadata to associate with new thread. Only relevant when new thread being created. instructions: Additional run instructions. model: Override Assistant model for this run. tools: Override Assistant tools for this run. tool_resources: Override Assistant tool resources for this run (v2 API). run_metadata: Metadata to associate with new run. config (Optional[RunnableConfig]): Configuration for the run. Returns: OutputType: If self.as_agent, will return Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise, will return OpenAI types Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. Raises: BaseException: If an error occurs during the invocation. """ config = ensure_config(config) callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), inheritable_tags=config.get("tags"), inheritable_metadata=config.get("metadata"), ) run_manager = callback_manager.on_chain_start( dumpd(self), input, name=config.get("run_name") or self.get_name() ) files = _convert_file_ids_into_attachments(kwargs.get("file_ids", [])) attachments = kwargs.get("attachments", []) + files try: # Being run within AgentExecutor and there are tool outputs to submit. if self.as_agent and input.get("intermediate_steps"): tool_outputs = self._parse_intermediate_steps( input["intermediate_steps"] ) run = self.client.beta.threads.runs.submit_tool_outputs(**tool_outputs) # Starting a new thread and a new run. elif "thread_id" not in input: thread = { "messages": [ { "role": "user", "content": input["content"], "attachments": attachments, "metadata": input.get("message_metadata"), } ], "metadata": input.get("thread_metadata"), } run = self._create_thread_and_run(input, thread) # Starting a new run in an existing thread. elif "run_id" not in input: _ = self.client.beta.threads.messages.create( input["thread_id"], content=input["content"], role="user", attachments=attachments, metadata=input.get("message_metadata"), ) run = self._create_run(input) # Submitting tool outputs to an existing run, outside the AgentExecutor # framework. else: run = self.client.beta.threads.runs.submit_tool_outputs(**input) run = self._wait_for_run(run.id, run.thread_id) except BaseException as e: run_manager.on_chain_error(e) raise e try: response = self._get_response(run) except BaseException as e: run_manager.on_chain_error(e, metadata=run.dict()) raise e else: run_manager.on_chain_end(response) return response @classmethod async def acreate_assistant( cls, name: str, instructions: str, tools: Sequence[Union[BaseTool, dict]], model: str, *, async_client: Optional[ Union[openai.AsyncOpenAI, openai.AsyncAzureOpenAI] ] = None, tool_resources: Optional[Union[AssistantToolResources, dict, NotGiven]] = None, **kwargs: Any, ) -> OpenAIAssistantRunnable: """Create an AsyncOpenAI Assistant and instantiate the Runnable. Args: name (str): Assistant name. instructions (str): Assistant instructions. tools (Sequence[Union[BaseTool, dict]]): Assistant tools. Can be passed in OpenAI format or as BaseTools. tool_resources (Optional[Union[AssistantToolResources, dict, NotGiven]]): Assistant tool resources. Can be passed in OpenAI format. model (str): Assistant model to use. async_client (Optional[Union[openai.OpenAI, openai.AzureOpenAI]]): OpenAI or AzureOpenAI async client. Will create default async_client if not specified. Returns: AsyncOpenAIAssistantRunnable: The configured assistant runnable. """ async_client = async_client or _get_openai_async_client() if tool_resources is None: from openai._types import NOT_GIVEN tool_resources = NOT_GIVEN openai_tools = [_get_assistants_tool(tool) for tool in tools] assistant = await async_client.beta.assistants.create( name=name, instructions=instructions, tools=openai_tools, # type: ignore tool_resources=tool_resources, # type: ignore[arg-type] model=model, ) return cls(assistant_id=assistant.id, async_client=async_client, **kwargs) async def ainvoke( self, input: dict, config: Optional[RunnableConfig] = None, **kwargs: Any ) -> OutputType: """Async invoke assistant. Args: input (dict): Runnable input dict that can have: content: User message when starting a new run. thread_id: Existing thread to use. run_id: Existing run to use. Should only be supplied when providing the tool output for a required action after an initial invocation. file_ids: (deprecated) File ids to include in new run. Use 'attachments' instead attachments: Assistant files to include in new run. (v2 API). message_metadata: Metadata to associate with new message. thread_metadata: Metadata to associate with new thread. Only relevant when new thread being created. instructions: Additional run instructions. model: Override Assistant model for this run. tools: Override Assistant tools for this run. tool_resources: Override Assistant tool resources for this run (v2 API). run_metadata: Metadata to associate with new run. config (Optional[RunnableConfig]): Configuration for the run. Returns: OutputType: If self.as_agent, will return Union[List[OpenAIAssistantAction], OpenAIAssistantFinish]. Otherwise, will return OpenAI types Union[List[ThreadMessage], List[RequiredActionFunctionToolCall]]. Raises: BaseException: If an error occurs during the invocation. """ config = config or {} callback_manager = CallbackManager.configure( inheritable_callbacks=config.get("callbacks"), inheritable_tags=config.get("tags"), inheritable_metadata=config.get("metadata"), ) run_manager = callback_manager.on_chain_start( dumpd(self), input, name=config.get("run_name") or self.get_name() ) files = _convert_file_ids_into_attachments(kwargs.get("file_ids", [])) attachments = kwargs.get("attachments", []) + files try: # Being run within AgentExecutor and there are tool outputs to submit. if self.as_agent and input.get("intermediate_steps"): tool_outputs = self._parse_intermediate_steps( input["intermediate_steps"] ) run = await self.async_client.beta.threads.runs.submit_tool_outputs( **tool_outputs ) # Starting a new thread and a new run. elif "thread_id" not in input: thread = { "messages": [ { "role": "user", "content": input["content"], "attachments": attachments, "metadata": input.get("message_metadata"), } ], "metadata": input.get("thread_metadata"), } run = await self._acreate_thread_and_run(input, thread) # Starting a new run in an existing thread. elif "run_id" not in input: _ = await self.async_client.beta.threads.messages.create( input["thread_id"], content=input["content"], role="user", attachments=attachments, metadata=input.get("message_metadata"), ) run = await self._acreate_run(input) # Submitting tool outputs to an existing run, outside the AgentExecutor # framework. else: run = await self.async_client.beta.threads.runs.submit_tool_outputs( **input ) run = await self._await_for_run(run.id, run.thread_id) except BaseException as e: run_manager.on_chain_error(e) raise e try: response = self._get_response(run) except BaseException as e: run_manager.on_chain_error(e, metadata=run.dict()) raise e else: run_manager.on_chain_end(response) return response def _create_run(self, input: dict) -> Any: """Create a new run within an existing thread. Args: input (dict): The input data for the new run. Returns: Any: The created run object. """ allowed_assistant_params = ( "instructions", "model", "tools", "tool_resources", "run_metadata", "truncation_strategy", "max_prompt_tokens", ) params = {k: v for k, v in input.items() if k in allowed_assistant_params} return self.client.beta.threads.runs.create( input["thread_id"], assistant_id=self.assistant_id, **params, ) def _create_thread_and_run(self, input: dict, thread: dict) -> Any: """Create a new thread and run. Args: input (dict): The input data for the run. thread (dict): The thread data to create. Returns: Any: The created thread and run. """ params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "run_metadata") } if tool_resources := input.get("tool_resources"): thread["tool_resources"] = tool_resources run = self.client.beta.threads.create_and_run( assistant_id=self.assistant_id, thread=thread, **params, ) return run async def _acreate_run(self, input: dict) -> Any: """Asynchronously create a new run within an existing thread. Args: input (dict): The input data for the new run. Returns: Any: The created run object. """ params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "tool_resources", "run_metadata") } return await self.async_client.beta.threads.runs.create( input["thread_id"], assistant_id=self.assistant_id, **params, ) async def _acreate_thread_and_run(self, input: dict, thread: dict) -> Any: """Asynchronously create a new thread and run simultaneously. Args: input (dict): The input data for the run. thread (dict): The thread data to create. Returns: Any: The created thread and run. """ params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "run_metadata") } if tool_resources := input.get("tool_resources"): thread["tool_resources"] = tool_resources run = await self.async_client.beta.threads.create_and_run( assistant_id=self.assistant_id, thread=thread, **params, ) return run
0
lc_public_repos/langchain/libs/community/langchain_community/agents
lc_public_repos/langchain/libs/community/langchain_community/agents/openai_assistant/__init__.py
from langchain_community.agents.openai_assistant.base import OpenAIAssistantV2Runnable __all__ = ["OpenAIAssistantV2Runnable"]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/semadb.py
from typing import Any, Iterable, List, Optional, Tuple from uuid import uuid4 import numpy as np import requests from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_env from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import DistanceStrategy class SemaDB(VectorStore): """`SemaDB` vector store. This vector store is a wrapper around the SemaDB database. Example: .. code-block:: python from langchain_community.vectorstores import SemaDB db = SemaDB('mycollection', 768, embeddings, DistanceStrategy.COSINE) """ HOST: str = "semadb.p.rapidapi.com" BASE_URL = "https://" + HOST def __init__( self, collection_name: str, vector_size: int, embedding: Embeddings, distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE, api_key: str = "", ): """initialize the SemaDB vector store.""" self.collection_name = collection_name self.vector_size = vector_size self.api_key = api_key or get_from_env("api_key", "SEMADB_API_KEY") self._embedding = embedding self.distance_strategy = distance_strategy @property def headers(self) -> dict: """Return the common headers.""" return { "content-type": "application/json", "X-RapidAPI-Key": self.api_key, "X-RapidAPI-Host": SemaDB.HOST, } def _get_internal_distance_strategy(self) -> str: """Return the internal distance strategy.""" if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return "euclidean" elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: raise ValueError("Max inner product is not supported by SemaDB") elif self.distance_strategy == DistanceStrategy.DOT_PRODUCT: return "dot" elif self.distance_strategy == DistanceStrategy.JACCARD: raise ValueError("Max inner product is not supported by SemaDB") elif self.distance_strategy == DistanceStrategy.COSINE: return "cosine" else: raise ValueError(f"Unknown distance strategy {self.distance_strategy}") def create_collection(self) -> bool: """Creates the corresponding collection in SemaDB.""" payload = { "id": self.collection_name, "vectorSize": self.vector_size, "distanceMetric": self._get_internal_distance_strategy(), } response = requests.post( SemaDB.BASE_URL + "/collections", json=payload, headers=self.headers, ) return response.status_code == 200 def delete_collection(self) -> bool: """Deletes the corresponding collection in SemaDB.""" response = requests.delete( SemaDB.BASE_URL + f"/collections/{self.collection_name}", headers=self.headers, ) return response.status_code == 200 def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """Add texts to the vector store.""" if not isinstance(texts, list): texts = list(texts) embeddings = self._embedding.embed_documents(texts) # Check dimensions if len(embeddings[0]) != self.vector_size: raise ValueError( f"Embedding size mismatch {len(embeddings[0])} != {self.vector_size}" ) # Normalise if needed if self.distance_strategy == DistanceStrategy.COSINE: embed_matrix = np.array(embeddings) embed_matrix = embed_matrix / np.linalg.norm( embed_matrix, axis=1, keepdims=True ) embeddings = embed_matrix.tolist() # Create points ids: List[str] = [] points = [] if metadatas is not None: for text, embedding, metadata in zip(texts, embeddings, metadatas): new_id = str(uuid4()) ids.append(new_id) points.append( { "id": new_id, "vector": embedding, "metadata": {**metadata, **{"text": text}}, } ) else: for text, embedding in zip(texts, embeddings): new_id = str(uuid4()) ids.append(new_id) points.append( { "id": new_id, "vector": embedding, "metadata": {"text": text}, } ) # Insert points in batches for i in range(0, len(points), batch_size): batch = points[i : i + batch_size] response = requests.post( SemaDB.BASE_URL + f"/collections/{self.collection_name}/points", json={"points": batch}, headers=self.headers, ) if response.status_code != 200: print("HERE--", batch) # noqa: T201 raise ValueError(f"Error adding points: {response.text}") failed_ranges = response.json()["failedRanges"] if len(failed_ranges) > 0: raise ValueError(f"Error adding points: {failed_ranges}") # Return ids return ids @property def embeddings(self) -> Embeddings: """Return the embeddings.""" return self._embedding def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ payload = { "ids": ids, } response = requests.delete( SemaDB.BASE_URL + f"/collections/{self.collection_name}/points", json=payload, headers=self.headers, ) return response.status_code == 200 and len(response.json()["failedPoints"]) == 0 def _search_points(self, embedding: List[float], k: int = 4) -> List[dict]: """Search points.""" # Normalise if needed if self.distance_strategy == DistanceStrategy.COSINE: vec = np.array(embedding) vec = vec / np.linalg.norm(vec) embedding = vec.tolist() # Perform search request payload = { "vector": embedding, "limit": k, } response = requests.post( SemaDB.BASE_URL + f"/collections/{self.collection_name}/points/search", json=payload, headers=self.headers, ) if response.status_code != 200: raise ValueError(f"Error searching: {response.text}") return response.json()["points"] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" query_embedding = self._embedding.embed_query(query) return self.similarity_search_by_vector(query_embedding, k=k) def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Run similarity search with distance.""" query_embedding = self._embedding.embed_query(query) points = self._search_points(query_embedding, k=k) return [ ( Document(page_content=p["metadata"]["text"], metadata=p["metadata"]), p["distance"], ) for p in points ] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ points = self._search_points(embedding, k=k) return [ Document(page_content=p["metadata"]["text"], metadata=p["metadata"]) for p in points ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "", vector_size: int = 0, api_key: str = "", distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE, **kwargs: Any, ) -> "SemaDB": """Return VectorStore initialized from texts and embeddings.""" if not collection_name: raise ValueError("Collection name must be provided") if not vector_size: raise ValueError("Vector size must be provided") if not api_key: raise ValueError("API key must be provided") semadb = cls( collection_name, vector_size, embedding, distance_strategy=distance_strategy, api_key=api_key, ) if not semadb.create_collection(): raise ValueError("Error creating collection") semadb.add_texts(texts, metadatas=metadatas) return semadb
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/awadb.py
from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import awadb logger = logging.getLogger() DEFAULT_TOPN = 4 class AwaDB(VectorStore): """`AwaDB` vector store.""" _DEFAULT_TABLE_NAME: str = "langchain_awadb" def __init__( self, table_name: str = _DEFAULT_TABLE_NAME, embedding: Optional[Embeddings] = None, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> None: """Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid` would be created automatically. Args: table_name: Name of the table created, default _DEFAULT_TABLE_NAME. embedding: Optional Embeddings initially set. log_and_data_dir: Optional the root directory of log and data. client: Optional AwaDB client. kwargs: Any possible extend parameters in the future. Returns: None. """ try: import awadb except ImportError: raise ImportError( "Could not import awadb python package. " "Please install it with `pip install awadb`." ) if client is not None: self.awadb_client = client else: if log_and_data_dir is not None: self.awadb_client = awadb.Client(log_and_data_dir) else: self.awadb_client = awadb.Client() if table_name == self._DEFAULT_TABLE_NAME: table_name += "_" table_name += str(uuid.uuid4()).split("-")[-1] self.awadb_client.Create(table_name) self.table2embeddings: dict[str, Embeddings] = {} if embedding is not None: self.table2embeddings[table_name] = embedding self.using_table_name = table_name @property def embeddings(self) -> Optional[Embeddings]: if self.using_table_name in self.table2embeddings: return self.table2embeddings[self.using_table_name] return None def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, is_duplicate_texts: Optional[bool] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. is_duplicate_texts: Optional whether to duplicate texts. Defaults to True. kwargs: any possible extend parameters in the future. Returns: List of ids from adding the texts into the vectorstore. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embeddings = None if self.using_table_name in self.table2embeddings: embeddings = self.table2embeddings[self.using_table_name].embed_documents( list(texts) ) return self.awadb_client.AddTexts( "embedding_text", "text_embedding", texts, embeddings, metadatas, is_duplicate_texts, ) def load_local( self, table_name: str, **kwargs: Any, ) -> bool: """Load the local specified table. Args: table_name: Table name kwargs: Any possible extend parameters in the future. Returns: Success or failure of loading the local specified table """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") return self.awadb_client.Load(table_name) def similarity_search( self, query: str, k: int = DEFAULT_TOPN, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text query. k: The maximum number of documents to return. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. E.g. `{"color" : "red", "price": 4.20}`. Optional. E.g. `{"max_price" : 15.66, "min_price": 4.20}` `price` is the metadata field, means range filter(4.20<'price'<15.66). E.g. `{"maxe_price" : 15.66, "mine_price": 4.20}` `price` is the metadata field, means range filter(4.20<='price'<=15.66). kwargs: Any possible extend parameters in the future. Returns: Returns the k most similar documents to the specified text query. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import AwaEmbedding embedding = AwaEmbedding().Embedding(query) not_include_fields: Set[str] = {"text_embedding", "_id", "score"} return self.similarity_search_by_vector( embedding, k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields, ) def similarity_search_with_score( self, query: str, k: int = DEFAULT_TOPN, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """The most k similar documents and scores of the specified query. Args: query: Text query. k: The k most similar documents to the text query. text_in_page_content: Filter by the text in page_content of Document. meta_filter: Filter by metadata. Defaults to None. kwargs: Any possible extend parameters in the future. Returns: The k most similar documents to the specified text query. 0 is dissimilar, 1 is the most similar. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding = None if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import AwaEmbedding embedding = AwaEmbedding().Embedding(query) results: List[Tuple[Document, float]] = [] not_include_fields: Set[str] = {"text_embedding", "_id"} retrieval_docs = self.similarity_search_by_vector( embedding, k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields, ) for doc in retrieval_docs: score = doc.metadata["score"] del doc.metadata["score"] doc_tuple = (doc, score) results.append(doc_tuple) return results def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: return self.similarity_search_with_score(query, k, **kwargs) def similarity_search_by_vector( self, embedding: Optional[List[float]] = None, k: int = DEFAULT_TOPN, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, not_include_fields_in_metadata: Optional[Set[str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. text_in_page_content: Filter by the text in page_content of Document. meta_filter: Filter by metadata. Defaults to None. not_incude_fields_in_metadata: Not include meta fields of each document. Returns: List of Documents which are the most similar to the query vector. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") results: List[Document] = [] if embedding is None: return results show_results = self.awadb_client.Search( embedding, k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields=not_include_fields_in_metadata, ) if show_results.__len__() == 0: return results for item_detail in show_results[0]["ResultItems"]: content = "" meta_data = {} for item_key in item_detail: if item_key == "embedding_text": content = item_detail[item_key] continue elif not_include_fields_in_metadata is not None: if item_key in not_include_fields_in_metadata: continue meta_data[item_key] = item_detail[item_key] results.append(Document(page_content=content, metadata=meta_data)) return results def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") embedding: List[float] = [] if self.using_table_name in self.table2embeddings: embedding = self.table2embeddings[self.using_table_name].embed_query(query) else: from awadb import AwaEmbedding embedding = AwaEmbedding().Embedding(query) if embedding.__len__() == 0: return [] results = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult, text_in_page_content=text_in_page_content, meta_filter=meta_filter, ) return results def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. text_in_page_content: Filter by the text in page_content of Document. meta_filter (Optional[dict]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") results: List[Document] = [] if embedding is None: return results not_include_fields: set = {"_id", "score"} retrieved_docs = self.similarity_search_by_vector( embedding, fetch_k, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields_in_metadata=not_include_fields, ) top_embeddings = [] for doc in retrieved_docs: top_embeddings.append(doc.metadata["text_embedding"]) selected_docs = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list=top_embeddings ) for s_id in selected_docs: if "text_embedding" in retrieved_docs[s_id].metadata: del retrieved_docs[s_id].metadata["text_embedding"] results.append(retrieved_docs[s_id]) return results def get( self, ids: Optional[List[str]] = None, text_in_page_content: Optional[str] = None, meta_filter: Optional[dict] = None, not_include_fields: Optional[Set[str]] = None, limit: Optional[int] = None, **kwargs: Any, ) -> Dict[str, Document]: """Return docs according ids. Args: ids: The ids of the embedding vectors. text_in_page_content: Filter by the text in page_content of Document. meta_filter: Filter by any metadata of the document. not_include_fields: Not pack the specified fields of each document. limit: The number of documents to return. Defaults to 5. Optional. Returns: Documents which satisfy the input conditions. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") docs_detail = self.awadb_client.Get( ids=ids, text_in_page_content=text_in_page_content, meta_filter=meta_filter, not_include_fields=not_include_fields, limit=limit, ) results: Dict[str, Document] = {} for doc_detail in docs_detail: content = "" meta_info = {} for field in doc_detail: if field == "embedding_text": content = doc_detail[field] continue elif field == "text_embedding" or field == "_id": continue meta_info[field] = doc_detail[field] doc = Document(page_content=content, metadata=meta_info) results[doc_detail["_id"]] = doc return results def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete the documents which have the specified ids. Args: ids: The ids of the embedding vectors. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful. False otherwise, None if not implemented. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") ret: Optional[bool] = None if ids is None or ids.__len__() == 0: return ret ret = self.awadb_client.Delete(ids) return ret def update( self, ids: List[str], texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Update the documents which have the specified ids. Args: ids: The id list of the updating embedding vector. texts: The texts of the updating documents. metadatas: The metadatas of the updating documents. Returns: the ids of the updated documents. """ if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") return self.awadb_client.UpdateTexts( ids=ids, text_field_name="embedding_text", texts=texts, metadatas=metadatas ) def create_table( self, table_name: str, **kwargs: Any, ) -> bool: """Create a new table.""" if self.awadb_client is None: return False ret = self.awadb_client.Create(table_name) if ret: self.using_table_name = table_name return ret def use( self, table_name: str, **kwargs: Any, ) -> bool: """Use the specified table. Don't know the tables, please invoke list_tables.""" if self.awadb_client is None: return False ret = self.awadb_client.Use(table_name) if ret: self.using_table_name = table_name return ret def list_tables( self, **kwargs: Any, ) -> List[str]: """List all the tables created by the client.""" if self.awadb_client is None: return [] return self.awadb_client.ListAllTables() def get_current_table( self, **kwargs: Any, ) -> str: """Get the current table.""" return self.using_table_name @classmethod def from_texts( cls: Type[AwaDB], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, table_name: str = _DEFAULT_TABLE_NAME, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> AwaDB: """Create an AwaDB vectorstore from a raw documents. Args: texts (List[str]): List of texts to add to the table. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. table_name (str): Name of the table to create. log_and_data_dir (Optional[str]): Directory of logging and persistence. client (Optional[awadb.Client]): AwaDB client Returns: AwaDB: AwaDB vectorstore. """ awadb_client = cls( table_name=table_name, embedding=embedding, log_and_data_dir=log_and_data_dir, client=client, ) awadb_client.add_texts(texts=texts, metadatas=metadatas) return awadb_client @classmethod def from_documents( cls: Type[AwaDB], documents: List[Document], embedding: Optional[Embeddings] = None, table_name: str = _DEFAULT_TABLE_NAME, log_and_data_dir: Optional[str] = None, client: Optional[awadb.Client] = None, **kwargs: Any, ) -> AwaDB: """Create an AwaDB vectorstore from a list of documents. If a log_and_data_dir specified, the table will be persisted there. Args: documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. table_name (str): Name of the table to create. log_and_data_dir (Optional[str]): Directory to persist the table. client (Optional[awadb.Client]): AwaDB client. Any: Any possible parameters in the future Returns: AwaDB: AwaDB vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, table_name=table_name, log_and_data_dir=log_and_data_dir, client=client, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/nucliadb.py
import os from typing import Any, Dict, Iterable, List, Optional, Type from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VST, VectorStore FIELD_TYPES = { "f": "files", "t": "texts", "l": "links", } class NucliaDB(VectorStore): """NucliaDB vector store.""" _config: Dict[str, Any] = {} def __init__( self, knowledge_box: str, local: bool, api_key: Optional[str] = None, backend: Optional[str] = None, ) -> None: """Initialize the NucliaDB client. Args: knowledge_box: the Knowledge Box id. local: Whether to use a local NucliaDB instance or Nuclia Cloud api_key: A contributor API key for the kb (needed when local is False) backend: The backend url to use when local is True, defaults to http://localhost:8080 """ try: from nuclia.sdk import NucliaAuth except ImportError: raise ImportError( "nuclia python package not found. " "Please install it with `pip install nuclia`." ) self._config["LOCAL"] = local zone = os.environ.get("NUCLIA_ZONE", "europe-1") self._kb = knowledge_box if local: if not backend: backend = "http://localhost:8080" self._config["BACKEND"] = f"{backend}/api/v1" self._config["TOKEN"] = None NucliaAuth().nucliadb(url=backend) NucliaAuth().kb(url=self.kb_url, interactive=False) else: self._config["BACKEND"] = f"https://{zone}.nuclia.cloud/api/v1" self._config["TOKEN"] = api_key NucliaAuth().kb( url=self.kb_url, token=self._config["TOKEN"], interactive=False ) @property def is_local(self) -> str: return self._config["LOCAL"] @property def kb_url(self) -> str: return f"{self._config['BACKEND']}/kb/{self._kb}" def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts to NucliaDB""" ids = [] from nuclia.sdk import NucliaResource factory = NucliaResource() for i, text in enumerate(texts): extra: Dict[str, Any] = {"metadata": ""} if metadatas: extra = {"metadata": metadatas[i]} id = factory.create( texts={"text": {"body": text}}, extra=extra, url=self.kb_url, api_key=self._config["TOKEN"], ) ids.append(id) return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: if not ids: return None from nuclia.sdk import NucliaResource factory = NucliaResource() results: List[bool] = [] for id in ids: try: factory.delete(rid=id, url=self.kb_url, api_key=self._config["TOKEN"]) results.append(True) except ValueError: results.append(False) return all(results) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: from nuclia.sdk import NucliaSearch from nucliadb_models.search import FindRequest, ResourceProperties request = FindRequest( query=query, page_size=k, show=[ResourceProperties.VALUES, ResourceProperties.EXTRA], ) search = NucliaSearch() results = search.find( query=request, url=self.kb_url, api_key=self._config["TOKEN"] ) paragraphs = [] for resource in results.resources.values(): for field in resource.fields.values(): for paragraph_id, paragraph in field.paragraphs.items(): info = paragraph_id.split("/") field_type = FIELD_TYPES.get(info[1], None) field_id = info[2] if not field_type: continue value = getattr(resource.data, field_type, {}).get(field_id, None) paragraphs.append( { "text": paragraph.text, "metadata": { "extra": getattr( getattr(resource, "extra", {}), "metadata", None ), "value": value, }, "order": paragraph.order, } ) sorted_paragraphs = sorted(paragraphs, key=lambda x: x["order"]) return [ Document(page_content=paragraph["text"], metadata=paragraph["metadata"]) for paragraph in sorted_paragraphs ] @classmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" raise NotImplementedError
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/zilliz.py
from __future__ import annotations import logging from typing import Any, Dict, List, Optional from langchain_core.embeddings import Embeddings from langchain_community.vectorstores.milvus import Milvus logger = logging.getLogger(__name__) class Zilliz(Milvus): """`Zilliz` vector store. You need to have `pymilvus` installed and a running Zilliz database. See the following documentation for how to run a Zilliz instance: https://docs.zilliz.com/docs/create-cluster IF USING L2/IP metric IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. Args: embedding_function (Embeddings): Function used to embed the text. collection_name (str): Which Zilliz collection to use. Defaults to "LangChainCollection". connection_args (Optional[dict[str, any]]): The connection args used for this class comes in the form of a dict. consistency_level (str): The consistency level to use for a collection. Defaults to "Session". index_params (Optional[dict]): Which index params to use. Defaults to HNSW/AUTOINDEX depending on service. search_params (Optional[dict]): Which search params to use. Defaults to default of index. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. auto_id (bool): Whether to enable auto id for primary key. Defaults to False. If False, you needs to provide text ids (string less than 65535 bytes). If True, Milvus will generate unique integers as primary keys. The connection args used for this class comes in the form of a dict, here are a few of the options: address (str): The actual address of Zilliz instance. Example address: "localhost:19530" uri (str): The uri of Zilliz instance. Example uri: "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", host (str): The host of Zilliz instance. Default at "localhost", PyMilvus will fill in the default host if only port is provided. port (str/int): The port of Zilliz instance. Default at 19530, PyMilvus will fill in the default port if only host is provided. user (str): Use which user to connect to Zilliz instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. token (str): API key, for serverless clusters which can be used as replacements for user and password. secure (bool): Default is false. If set to true, tls will be enabled. client_key_path (str): If use tls two-way authentication, need to write the client.key path. client_pem_path (str): If use tls two-way authentication, need to write the client.pem path. ca_pem_path (str): If use tls two-way authentication, need to write the ca.pem path. server_pem_path (str): If use tls one-way authentication, need to write the server.pem path. server_name (str): If use tls, need to write the common name. Example: .. code-block:: python from langchain_community.vectorstores import Zilliz from langchain_community.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() # Connect to a Zilliz instance milvus_store = Milvus( embedding_function = embedding, collection_name = "LangChainCollection", connection_args = { "uri": "https://in03-ba4234asae.api.gcp-us-west1.zillizcloud.com", "user": "temp", "password": "temp", "token": "temp", # API key as replacements for user and password "secure": True } drop_old: True, ) Raises: ValueError: If the pymilvus python package is not installed. """ def _create_index(self) -> None: """Create a index on the collection""" from pymilvus import Collection, MilvusException if isinstance(self.col, Collection) and self._get_index() is None: try: # If no index params, use a default AutoIndex based one if self.index_params is None: self.index_params = { "metric_type": "L2", "index_type": "AUTOINDEX", "params": {}, } try: self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) # If default did not work, most likely Milvus self-hosted except MilvusException: # Use HNSW based index self.index_params = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) logger.debug( "Successfully created an index on collection: %s", self.collection_name, ) except MilvusException as e: logger.error( "Failed to create an index on collection: %s", self.collection_name ) raise e @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", connection_args: Optional[Dict[str, Any]] = None, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, *, ids: Optional[List[str]] = None, auto_id: bool = False, **kwargs: Any, ) -> Zilliz: """Create a Zilliz collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. ids (Optional[List[str]]): List of text ids. auto_id (bool): Whether to enable auto id for primary key. Defaults to False. If False, you needs to provide text ids (string less than 65535 bytes). If True, Milvus will generate unique integers as primary keys. Returns: Zilliz: Zilliz Vector Store """ vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args or {}, consistency_level=consistency_level, index_params=index_params, search_params=search_params, drop_old=drop_old, auto_id=auto_id, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas, ids=ids) return vector_db
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/hippo.py
from __future__ import annotations import logging from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from transwarp_hippo_api.hippo_client import HippoClient # Default connection DEFAULT_HIPPO_CONNECTION = { "host": "localhost", "port": "7788", "username": "admin", "password": "admin", } logger = logging.getLogger(__name__) class Hippo(VectorStore): """`Hippo` vector store. You need to install `hippo-api` and run Hippo. Please visit our official website for how to run a Hippo instance: https://www.transwarp.cn/starwarp Args: embedding_function (Embeddings): Function used to embed the text. table_name (str): Which Hippo table to use. Defaults to "test". database_name (str): Which Hippo database to use. Defaults to "default". number_of_shards (int): The number of shards for the Hippo table.Defaults to 1. number_of_replicas (int): The number of replicas for the Hippo table.Defaults to 1. connection_args (Optional[dict[str, any]]): The connection args used for this class comes in the form of a dict. index_params (Optional[dict]): Which index params to use. Defaults to IVF_FLAT. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. primary_field (str): Name of the primary key field. Defaults to "pk". text_field (str): Name of the text field. Defaults to "text". vector_field (str): Name of the vector field. Defaults to "vector". The connection args used for this class comes in the form of a dict, here are a few of the options: host (str): The host of Hippo instance. Default at "localhost". port (str/int): The port of Hippo instance. Default at 7788. user (str): Use which user to connect to Hippo instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. Example: .. code-block:: python from langchain_community.vectorstores import Hippo from langchain_community.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() # Connect to a hippo instance on localhost vector_store = Hippo.from_documents( docs, embedding=embeddings, table_name="langchain_test", connection_args=HIPPO_CONNECTION ) Raises: ValueError: If the hippo-api python package is not installed. """ def __init__( self, embedding_function: Embeddings, table_name: str = "test", database_name: str = "default", number_of_shards: int = 1, number_of_replicas: int = 1, connection_args: Optional[Dict[str, Any]] = None, index_params: Optional[dict] = None, drop_old: Optional[bool] = False, ): self.number_of_shards = number_of_shards self.number_of_replicas = number_of_replicas self.embedding_func = embedding_function self.table_name = table_name self.database_name = database_name self.index_params = index_params # In order for a collection to be compatible, # 'pk' should be an auto-increment primary key and string self._primary_field = "pk" # In order for compatibility, the text field will need to be called "text" self._text_field = "text" # In order for compatibility, the vector field needs to be called "vector" self._vector_field = "vector" self.fields: List[str] = [] # Create the connection to the server if connection_args is None: connection_args = DEFAULT_HIPPO_CONNECTION self.hc = self._create_connection_alias(connection_args) self.col: Any = None # If the collection exists, delete it try: if ( self.hc.check_table_exists(self.table_name, self.database_name) and drop_old ): self.hc.delete_table(self.table_name, self.database_name) except Exception as e: logging.error( f"An error occurred while deleting the table " f"{self.table_name}: {e}" ) raise try: if self.hc.check_table_exists(self.table_name, self.database_name): self.col = self.hc.get_table(self.table_name, self.database_name) except Exception as e: logging.error( f"An error occurred while getting the table " f"{self.table_name}: {e}" ) raise # Initialize the vector database self._get_env() def _create_connection_alias(self, connection_args: dict) -> HippoClient: """Create the connection to the Hippo server.""" # Grab the connection arguments that are used for checking existing connection try: from transwarp_hippo_api.hippo_client import HippoClient except ImportError as e: raise ImportError( "Unable to import transwarp_hipp_api, please install with " "`pip install hippo-api`." ) from e host: str = connection_args.get("host", None) port: int = connection_args.get("port", None) username: str = connection_args.get("username", "shiva") password: str = connection_args.get("password", "shiva") # Order of use is host/port, uri, address if host is not None and port is not None: if "," in host: hosts = host.split(",") given_address = ",".join([f"{h}:{port}" for h in hosts]) else: given_address = str(host) + ":" + str(port) else: raise ValueError("Missing standard address type for reuse attempt") try: logger.info(f"create HippoClient[{given_address}]") return HippoClient([given_address], username=username, pwd=password) except Exception as e: logger.error("Failed to create new connection") raise e def _get_env( self, embeddings: Optional[list] = None, metadatas: Optional[List[dict]] = None ) -> None: logger.info("init ...") if embeddings is not None: logger.info("create collection") self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index() def _create_collection( self, embeddings: list, metadatas: Optional[List[dict]] = None ) -> None: from transwarp_hippo_api.hippo_client import HippoField from transwarp_hippo_api.hippo_type import HippoType # Determine embedding dim dim = len(embeddings[0]) logger.debug(f"[_create_collection] dim: {dim}") fields = [] # Create the primary key field fields.append(HippoField(self._primary_field, True, HippoType.STRING)) # Create the text field fields.append(HippoField(self._text_field, False, HippoType.STRING)) # Create the vector field, supports binary or float vectors # to The binary vector type is to be developed. fields.append( HippoField( self._vector_field, False, HippoType.FLOAT_VECTOR, type_params={"dimension": dim}, ) ) # to In Hippo,there is no method similar to the infer_type_data # types, so currently all non-vector data is converted to string type. if metadatas: # # Create FieldSchema for each entry in metadata. for key, value in metadatas[0].items(): # # Infer the corresponding datatype of the metadata if isinstance(value, list): value_dim = len(value) fields.append( HippoField( key, False, HippoType.FLOAT_VECTOR, type_params={"dimension": value_dim}, ) ) else: fields.append(HippoField(key, False, HippoType.STRING)) logger.debug(f"[_create_collection] fields: {fields}") # Create the collection self.hc.create_table( name=self.table_name, auto_id=True, fields=fields, database_name=self.database_name, number_of_shards=self.number_of_shards, number_of_replicas=self.number_of_replicas, ) self.col = self.hc.get_table(self.table_name, self.database_name) logger.info( f"[_create_collection] : " f"create table {self.table_name} in {self.database_name} successfully" ) def _extract_fields(self) -> None: """Grab the existing fields from the Collection""" from transwarp_hippo_api.hippo_client import HippoTable if isinstance(self.col, HippoTable): schema = self.col.schema logger.debug(f"[_extract_fields] schema:{schema}") for x in schema: self.fields.append(x.name) logger.debug(f"04 [_extract_fields] fields:{self.fields}") # TO CAN: Translated into English, your statement would be: "Currently, # only the field named 'vector' (the automatically created vector field) # is checked for indexing. Indexes need to be created manually for other # vector type columns. def _get_index(self) -> Optional[Dict[str, Any]]: """Return the vector index information if it exists""" from transwarp_hippo_api.hippo_client import HippoTable if isinstance(self.col, HippoTable): table_info = self.hc.get_table_info( self.table_name, self.database_name ).get(self.table_name, {}) embedding_indexes = table_info.get("embedding_indexes", None) if embedding_indexes is None: return None else: for x in self.hc.get_table_info(self.table_name, self.database_name)[ self.table_name ]["embedding_indexes"]: logger.debug(f"[_get_index] embedding_indexes {embedding_indexes}") if x["column"] == self._vector_field: return x return None # TO Indexes can only be created for the self._vector_field field. def _create_index(self) -> None: """Create a index on the collection""" from transwarp_hippo_api.hippo_client import HippoTable from transwarp_hippo_api.hippo_type import IndexType, MetricType if isinstance(self.col, HippoTable) and self._get_index() is None: if self._get_index() is None: if self.index_params is None: self.index_params = { "index_name": "langchain_auto_create", "metric_type": MetricType.L2, "index_type": IndexType.IVF_FLAT, "nlist": 10, } self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params["nlist"], ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) logger.info("create index successfully") else: index_dict = { "IVF_FLAT": IndexType.IVF_FLAT, "FLAT": IndexType.FLAT, "IVF_SQ": IndexType.IVF_SQ, "IVF_PQ": IndexType.IVF_PQ, "HNSW": IndexType.HNSW, } metric_dict = { "ip": MetricType.IP, "IP": MetricType.IP, "l2": MetricType.L2, "L2": MetricType.L2, } self.index_params["metric_type"] = metric_dict[ self.index_params["metric_type"] ] if self.index_params["index_type"] == "FLAT": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif ( self.index_params["index_type"] == "IVF_FLAT" or self.index_params["index_type"] == "IVF_SQ" ): self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params.get("nlist", 10), nprobe=self.index_params.get("nprobe", 10), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif self.index_params["index_type"] == "IVF_PQ": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], nlist=self.index_params.get("nlist", 10), nprobe=self.index_params.get("nprobe", 10), nbits=self.index_params.get("nbits", 8), m=self.index_params.get("m"), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) elif self.index_params["index_type"] == "HNSW": self.index_params["index_type"] = index_dict[ self.index_params["index_type"] ] self.col.create_index( self._vector_field, self.index_params["index_name"], self.index_params["index_type"], self.index_params["metric_type"], M=self.index_params.get("M"), ef_construction=self.index_params.get("ef_construction"), ef_search=self.index_params.get("ef_search"), ) logger.debug( self.col.activate_index(self.index_params["index_name"]) ) else: raise ValueError( "Index name does not match, " "please enter the correct index name. " "(FLAT, IVF_FLAT, IVF_PQ,IVF_SQ, HNSW)" ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """ Add text to the collection. Args: texts: An iterable that contains the text to be added. metadatas: An optional list of dictionaries, each dictionary contains the metadata associated with a text. timeout: Optional timeout, in seconds. batch_size: The number of texts inserted in each batch, defaults to 1000. **kwargs: Other optional parameters. Returns: A list of strings, containing the unique identifiers of the inserted texts. Note: If the collection has not yet been created, this method will create a new collection. """ from transwarp_hippo_api.hippo_client import HippoTable if not texts or all(t == "" for t in texts): logger.debug("Nothing to insert, skipping.") return [] texts = list(texts) logger.debug(f"[add_texts] texts: {texts}") try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] logger.debug(f"[add_texts] len_embeddings:{len(embeddings)}") # 如果还没有创建collection则创建collection if not isinstance(self.col, HippoTable): self._get_env(embeddings, metadatas) # Dict to hold all insert columns insert_dict: Dict[str, list] = { self._text_field: texts, self._vector_field: embeddings, } logger.debug(f"[add_texts] metadatas:{metadatas}") logger.debug(f"[add_texts] fields:{self.fields}") if metadatas is not None: for d in metadatas: for key, value in d.items(): if key in self.fields: insert_dict.setdefault(key, []).append(value) logger.debug(insert_dict[self._text_field]) # Total insert count vectors: list = insert_dict[self._vector_field] total_count = len(vectors) if "pk" in self.fields: self.fields.remove("pk") logger.debug(f"[add_texts] total_count:{total_count}") for i in range(0, total_count, batch_size): # Grab end index end = min(i + batch_size, total_count) # Convert dict to list of lists batch for insertion insert_list = [insert_dict[x][i:end] for x in self.fields] try: res = self.col.insert_rows(insert_list) logger.info(f"05 [add_texts] insert {res}") except Exception as e: logger.error( "Failed to insert batch starting at entity: %s/%s", i, total_count ) raise e return [""] def similarity_search( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """ Perform a similarity search on the query string. Args: query (str): The text to search for. k (int, optional): The number of results to return. Default is 4. param (dict, optional): Specifies the search parameters for the index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): Time to wait before a timeout error. Defaults to None. kwargs: Keyword arguments for Collection.search(). Returns: List[Document]: The document results of the search. """ if self.col is None: logger.debug("No existing collection to search.") return [] res = self.similarity_search_with_score( query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] def similarity_search_with_score( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Performs a search on the query string and returns results with scores. Args: query (str): The text being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug("No existing collection to search.") return [] # Embed the query text. embedding = self.embedding_func.embed_query(query) ret = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return ret def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Performs a search on the query string and returns results with scores. Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[Tuple[Document, float]]: Resulting documents and scores. """ if self.col is None: logger.debug("No existing collection to search.") return [] # if param is None: # param = self.search_params # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) # Perform the search. logger.debug(f"search_field:{self._vector_field}") logger.debug(f"vectors:{[embedding]}") logger.debug(f"output_fields:{output_fields}") logger.debug(f"topk:{k}") logger.debug(f"dsl:{expr}") res = self.col.query( search_field=self._vector_field, vectors=[embedding], output_fields=output_fields, topk=k, dsl=expr, ) # Organize results. logger.debug(f"[similarity_search_with_score_by_vector] res:{res}") score_col = self._text_field + "%scores" ret = [] count = 0 for items in zip(*[res[0][field] for field in output_fields]): meta = {field: value for field, value in zip(output_fields, items)} doc = Document(page_content=meta.pop(self._text_field), metadata=meta) logger.debug( f"[similarity_search_with_score_by_vector] " f"res[0][score_col]:{res[0][score_col]}" ) score = res[0][score_col][count] count += 1 ret.append((doc, score)) return ret @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, table_name: str = "test", database_name: str = "default", connection_args: Dict[str, Any] = DEFAULT_HIPPO_CONNECTION, index_params: Optional[Dict[Any, Any]] = None, search_params: Optional[Dict[str, Any]] = None, drop_old: bool = False, **kwargs: Any, ) -> "Hippo": """ Creates an instance of the VST class from the given texts. Args: texts (List[str]): List of texts to be added. embedding (Embeddings): Embedding model for the texts. metadatas (List[dict], optional): List of metadata dictionaries for each text.Defaults to None. table_name (str): Name of the table. Defaults to "test". database_name (str): Name of the database. Defaults to "default". connection_args (dict[str, Any]): Connection parameters. Defaults to DEFAULT_HIPPO_CONNECTION. index_params (dict): Indexing parameters. Defaults to None. search_params (dict): Search parameters. Defaults to an empty dictionary. drop_old (bool): Whether to drop the old collection. Defaults to False. kwargs: Other arguments. Returns: Hippo: An instance of the VST class. """ if search_params is None: search_params = {} logger.info("00 [from_texts] init the class of Hippo") vector_db = cls( embedding_function=embedding, table_name=table_name, database_name=database_name, connection_args=connection_args, index_params=index_params, drop_old=drop_old, **kwargs, ) logger.debug(f"[from_texts] texts:{texts}") logger.debug(f"[from_texts] metadatas:{metadatas}") vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/chroma.py
from __future__ import annotations import base64 import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, ) import numpy as np from langchain_core._api import deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import xor_args from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import chromadb import chromadb.config from chromadb.api.types import ID, OneOrMany, Where, WhereDocument logger = logging.getLogger() DEFAULT_K = 4 # Number of Documents to return. def _results_to_docs(results: Any) -> List[Document]: return [doc for doc, _ in _results_to_docs_and_scores(results)] def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]: return [ # TODO: Chroma can do batch querying, # we shouldn't hard code to the 1st result (Document(page_content=result[0], metadata=result[1] or {}), result[2]) for result in zip( results["documents"][0], results["metadatas"][0], results["distances"][0], ) ] @deprecated(since="0.2.9", removal="1.0", alternative_import="langchain_chroma.Chroma") class Chroma(VectorStore): """`ChromaDB` vector store. To use, you should have the ``chromadb`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import Chroma from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Chroma("langchain_store", embeddings) """ _LANGCHAIN_DEFAULT_COLLECTION_NAME: str = "langchain" def __init__( self, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, embedding_function: Optional[Embeddings] = None, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, collection_metadata: Optional[Dict] = None, client: Optional[chromadb.Client] = None, # type: ignore[valid-type] relevance_score_fn: Optional[Callable[[float], float]] = None, ) -> None: """Initialize with a Chroma client.""" try: import chromadb import chromadb.config except ImportError: raise ImportError( "Could not import chromadb python package. " "Please install it with `pip install chromadb`." ) if client is not None: self._client_settings = client_settings self._client = client self._persist_directory = persist_directory else: if client_settings: # If client_settings is provided with persist_directory specified, # then it is "in-memory and persisting to disk" mode. client_settings.persist_directory = ( persist_directory or client_settings.persist_directory ) if client_settings.persist_directory is not None: # Maintain backwards compatibility with chromadb < 0.4.0 major, minor, _ = chromadb.__version__.split(".") if int(major) == 0 and int(minor) < 4: client_settings.chroma_db_impl = "duckdb+parquet" _client_settings = client_settings elif persist_directory: # Maintain backwards compatibility with chromadb < 0.4.0 major, minor, _ = chromadb.__version__.split(".") if int(major) == 0 and int(minor) < 4: _client_settings = chromadb.config.Settings( chroma_db_impl="duckdb+parquet", ) else: _client_settings = chromadb.config.Settings(is_persistent=True) _client_settings.persist_directory = persist_directory else: _client_settings = chromadb.config.Settings() self._client_settings = _client_settings # type: ignore[has-type] self._client = chromadb.Client(_client_settings) # type: ignore[has-type] self._persist_directory = ( # type: ignore[has-type] _client_settings.persist_directory or persist_directory ) self._embedding_function = embedding_function self._collection = self._client.get_or_create_collection( # type: ignore[has-type] name=collection_name, embedding_function=None, metadata=collection_metadata, ) self.override_relevance_score_fn = relevance_score_fn @property def embeddings(self) -> Optional[Embeddings]: return self._embedding_function @xor_args(("query_texts", "query_embeddings")) def __query_collection( self, query_texts: Optional[List[str]] = None, query_embeddings: Optional[List[List[float]]] = None, n_results: int = 4, where: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Query the chroma collection.""" try: import chromadb # noqa: F401 except ImportError: raise ImportError( "Could not import chromadb python package. " "Please install it with `pip install chromadb`." ) return self._collection.query( # type: ignore[return-value] query_texts=query_texts, query_embeddings=query_embeddings, # type: ignore[arg-type] n_results=n_results, where=where, # type: ignore[arg-type] where_document=where_document, # type: ignore[arg-type] **kwargs, ) def encode_image(self, uri: str) -> str: """Get base64 string from image URI.""" with open(uri, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def add_images( self, uris: List[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more images through the embeddings and add to the vectorstore. Args: uris List[str]: File path to the image. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added images. """ # Map from uris to b64 encoded strings b64_texts = [self.encode_image(uri=uri) for uri in uris] # Populate IDs if ids is None: ids = [str(uuid.uuid4()) for _ in uris] embeddings = None # Set embeddings if self._embedding_function is not None and hasattr( self._embedding_function, "embed_image" ): embeddings = self._embedding_function.embed_image(uris=uris) if metadatas: # fill metadatas with empty dicts if somebody # did not specify metadata for all images length_diff = len(uris) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] images_with_metadatas = [b64_texts[idx] for idx in non_empty_ids] embeddings_with_metadatas = ( [embeddings[idx] for idx in non_empty_ids] if embeddings else None ) ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert( metadatas=metadatas, # type: ignore[arg-type] embeddings=embeddings_with_metadatas, documents=images_with_metadatas, ids=ids_with_metadata, ) except ValueError as e: if "Expected metadata value to be" in str(e): msg = ( "Try filtering complex metadata using " "langchain_community.vectorstores.utils.filter_complex_metadata." ) raise ValueError(e.args[0] + "\n\n" + msg) else: raise e if empty_ids: images_without_metadatas = [b64_texts[j] for j in empty_ids] embeddings_without_metadatas = ( [embeddings[j] for j in empty_ids] if embeddings else None ) ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert( embeddings=embeddings_without_metadatas, documents=images_without_metadatas, ids=ids_without_metadatas, ) else: self._collection.upsert( embeddings=embeddings, documents=b64_texts, ids=ids, ) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection if ids is None: ids = [str(uuid.uuid4()) for _ in texts] embeddings = None texts = list(texts) if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(texts) if metadatas: # fill metadatas with empty dicts if somebody # did not specify metadata for all texts length_diff = len(texts) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] texts_with_metadatas = [texts[idx] for idx in non_empty_ids] embeddings_with_metadatas = ( [embeddings[idx] for idx in non_empty_ids] if embeddings else None ) ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert( metadatas=metadatas, # type: ignore[arg-type] embeddings=embeddings_with_metadatas, # type: ignore[arg-type] documents=texts_with_metadatas, ids=ids_with_metadata, ) except ValueError as e: if "Expected metadata value to be" in str(e): msg = ( "Try filtering complex metadata from the document using " "langchain_community.vectorstores.utils.filter_complex_metadata." ) raise ValueError(e.args[0] + "\n\n" + msg) else: raise e if empty_ids: texts_without_metadatas = [texts[j] for j in empty_ids] embeddings_without_metadatas = ( [embeddings[j] for j in empty_ids] if embeddings else None ) ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert( embeddings=embeddings_without_metadatas, # type: ignore[arg-type] documents=texts_without_metadatas, ids=ids_without_metadatas, ) else: self._collection.upsert( embeddings=embeddings, # type: ignore[arg-type] documents=texts, ids=ids, ) return ids def similarity_search( self, query: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Chroma. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_score( query, k, filter=filter, **kwargs ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ results = self.__query_collection( query_embeddings=embedding, n_results=k, where=filter, where_document=where_document, **kwargs, ) return _results_to_docs(results) def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Return docs most similar to embedding vector and similarity score. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ results = self.__query_collection( query_embeddings=embedding, n_results=k, where=filter, where_document=where_document, **kwargs, ) return _results_to_docs_and_scores(results) def similarity_search_with_score( self, query: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding_function is None: results = self.__query_collection( query_texts=[query], n_results=k, where=filter, where_document=where_document, **kwargs, ) else: query_embedding = self._embedding_function.embed_query(query) results = self.__query_collection( query_embeddings=[query_embedding], n_results=k, where=filter, where_document=where_document, **kwargs, ) return _results_to_docs_and_scores(results) def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn: return self.override_relevance_score_fn distance = "l2" distance_key = "hnsw:space" metadata = self._collection.metadata if metadata and distance_key in metadata: distance = metadata[distance_key] if distance == "cosine": return self._cosine_relevance_score_fn elif distance == "l2": return self._euclidean_relevance_score_fn elif distance == "ip": return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance metric of type: {distance}." "Consider providing relevance_score_fn to Chroma constructor." ) def similarity_search_by_image( self, uri: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Search for similar images based on the given image URI. Args: uri (str): URI of the image to search for. k (int, optional): Number of results to return. Defaults to DEFAULT_K. filter (Optional[Dict[str, str]], optional): Filter by metadata. **kwargs (Any): Additional arguments to pass to function. Returns: List of Images most similar to the provided image. Each element in list is a Langchain Document Object. The page content is b64 encoded image, metadata is default or as defined by user. Raises: ValueError: If the embedding function does not support image embeddings. """ if self._embedding_function is None or not hasattr( self._embedding_function, "embed_image" ): raise ValueError("The embedding function must support image embedding.") # Obtain image embedding # Assuming embed_image returns a single embedding image_embedding = self._embedding_function.embed_image(uris=[uri]) # Perform similarity search based on the obtained embedding results = self.similarity_search_by_vector( embedding=image_embedding, k=k, filter=filter, **kwargs, ) return results def similarity_search_by_image_with_relevance_score( self, uri: str, k: int = DEFAULT_K, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Search for similar images based on the given image URI. Args: uri (str): URI of the image to search for. k (int, optional): Number of results to return. Defaults to DEFAULT_K. filter (Optional[Dict[str, str]], optional): Filter by metadata. **kwargs (Any): Additional arguments to pass to function. Returns: List[Tuple[Document, float]]: List of tuples containing documents similar to the query image and their similarity scores. 0th element in each tuple is a Langchain Document Object. The page content is b64 encoded img, metadata is default or defined by user. Raises: ValueError: If the embedding function does not support image embeddings. """ if self._embedding_function is None or not hasattr( self._embedding_function, "embed_image" ): raise ValueError("The embedding function must support image embedding.") # Obtain image embedding # Assuming embed_image returns a single embedding image_embedding = self._embedding_function.embed_image(uris=[uri]) # Perform similarity search based on the obtained embedding results = self.similarity_search_by_vector_with_relevance_scores( embedding=image_embedding, k=k, filter=filter, **kwargs, ) return results def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self.__query_collection( query_embeddings=embedding, n_results=fetch_k, where=filter, where_document=where_document, include=["metadatas", "documents", "distances", "embeddings"], **kwargs, ) mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), results["embeddings"][0], k=k, lambda_mult=lambda_mult, ) candidates = _results_to_docs(results) selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] return selected_results def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, where_document: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult, filter=filter, where_document=where_document, ) return docs def delete_collection(self) -> None: """Delete the collection.""" self._client.delete_collection(self._collection.name) # type: ignore[has-type] def get( self, ids: Optional[OneOrMany[ID]] = None, where: Optional[Where] = None, limit: Optional[int] = None, offset: Optional[int] = None, where_document: Optional[WhereDocument] = None, include: Optional[List[str]] = None, ) -> Dict[str, Any]: """Gets the collection. Args: ids: The ids of the embeddings to get. Optional. where: A Where type dict used to filter results by. E.g. `{"color" : "red", "price": 4.20}`. Optional. limit: The number of documents to return. Optional. offset: The offset to start returning results from. Useful for paging results with limit. Optional. where_document: A WhereDocument type dict used to filter by the documents. E.g. `{$contains: "hello"}`. Optional. include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional. """ kwargs = { "ids": ids, "where": where, "limit": limit, "offset": offset, "where_document": where_document, } if include is not None: kwargs["include"] = include return self._collection.get(**kwargs) # type: ignore[return-value, arg-type, arg-type, arg-type, arg-type, arg-type] @deprecated( since="0.1.17", message=( "Since Chroma 0.4.x the manual persistence method is no longer " "supported as docs are automatically persisted." ), removal="1.0", ) def persist(self) -> None: """Persist the collection. This can be used to explicitly persist the data to disk. It will also be called automatically when the object is destroyed. Since Chroma 0.4.x the manual persistence method is no longer supported as docs are automatically persisted. """ if self._persist_directory is None: # type: ignore[has-type] raise ValueError( "You must specify a persist_directory on" "creation to persist the collection." ) import chromadb # Maintain backwards compatibility with chromadb < 0.4.0 major, minor, _ = chromadb.__version__.split(".") if int(major) == 0 and int(minor) < 4: self._client.persist() # type: ignore[has-type] def update_document(self, document_id: str, document: Document) -> None: """Update a document in the collection. Args: document_id (str): ID of the document to update. document (Document): Document to update. """ return self.update_documents([document_id], [document]) def update_documents(self, ids: List[str], documents: List[Document]) -> None: """Update a document in the collection. Args: ids (List[str]): List of ids of the document to update. documents (List[Document]): List of documents to update. """ text = [document.page_content for document in documents] metadata = [document.metadata for document in documents] if self._embedding_function is None: raise ValueError( "For update, you must specify an embedding function on creation." ) embeddings = self._embedding_function.embed_documents(text) if hasattr( self._collection._client, "get_max_batch_size", # for Chroma 0.5.1 and above ) or hasattr( self._collection._client, "max_batch_size" ): # for Chroma 0.4.10 and above from chromadb.utils.batch_utils import create_batches for batch in create_batches( api=self._collection._client, ids=ids, metadatas=metadata, # type: ignore[arg-type] documents=text, embeddings=embeddings, # type: ignore[arg-type] ): self._collection.update( ids=batch[0], embeddings=batch[1], documents=batch[3], metadatas=batch[2], ) else: self._collection.update( ids=ids, embeddings=embeddings, # type: ignore[arg-type] documents=text, metadatas=metadata, # type: ignore[arg-type] ) @classmethod def from_texts( cls: Type[Chroma], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, # type: ignore[valid-type] collection_metadata: Optional[Dict] = None, **kwargs: Any, ) -> Chroma: """Create a Chroma vectorstore from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: texts (List[str]): List of texts to add to the collection. collection_name (str): Name of the collection to create. persist_directory (Optional[str]): Directory to persist the collection. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. client_settings (Optional[chromadb.config.Settings]): Chroma client settings collection_metadata (Optional[Dict]): Collection configurations. Defaults to None. Returns: Chroma: Chroma vectorstore. """ chroma_collection = cls( collection_name=collection_name, embedding_function=embedding, persist_directory=persist_directory, client_settings=client_settings, client=client, collection_metadata=collection_metadata, **kwargs, ) if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if hasattr( chroma_collection._client, # type: ignore[has-type] "get_max_batch_size", # for Chroma 0.5.1 and above ) or hasattr( chroma_collection._client, # type: ignore[has-type] "max_batch_size", ): # for Chroma 0.4.10 and above from chromadb.utils.batch_utils import create_batches for batch in create_batches( api=chroma_collection._client, # type: ignore[has-type] ids=ids, metadatas=metadatas, # type: ignore[arg-type] documents=texts, ): chroma_collection.add_texts( texts=batch[3] if batch[3] else [], metadatas=batch[2] if batch[2] else None, # type: ignore[arg-type] ids=batch[0], ) else: chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids) return chroma_collection @classmethod def from_documents( cls: Type[Chroma], documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[ # type: ignore[valid-type] chromadb.Client ] = None, # Add this line # type: ignore[valid-type] collection_metadata: Optional[Dict] = None, **kwargs: Any, ) -> Chroma: """Create a Chroma vectorstore from a list of documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: collection_name (str): Name of the collection to create. persist_directory (Optional[str]): Directory to persist the collection. ids (Optional[List[str]]): List of document IDs. Defaults to None. documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. client_settings (Optional[chromadb.config.Settings]): Chroma client settings collection_metadata (Optional[Dict]): Collection configurations. Defaults to None. Returns: Chroma: Chroma vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, persist_directory=persist_directory, client_settings=client_settings, client=client, collection_metadata=collection_metadata, **kwargs, ) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ self._collection.delete(ids=ids, **kwargs) def __len__(self) -> int: """Count the number of documents in the collection.""" return self._collection.count()
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/typesense.py
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Union from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_env from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from typesense.client import Client from typesense.collection import Collection class Typesense(VectorStore): """`Typesense` vector store. To use, you should have the ``typesense`` python package installed. Example: .. code-block:: python from langchain_community.embedding.openai import OpenAIEmbeddings from langchain_community.vectorstores import Typesense import typesense node = { "host": "localhost", # For Typesense Cloud use xxx.a1.typesense.net "port": "8108", # For Typesense Cloud use 443 "protocol": "http" # For Typesense Cloud use https } typesense_client = typesense.Client( { "nodes": [node], "api_key": "<API_KEY>", "connection_timeout_seconds": 2 } ) typesense_collection_name = "langchain-memory" embedding = OpenAIEmbeddings() vectorstore = Typesense( typesense_client=typesense_client, embedding=embedding, typesense_collection_name=typesense_collection_name, text_key="text", ) """ def __init__( self, typesense_client: Client, embedding: Embeddings, *, typesense_collection_name: Optional[str] = None, text_key: str = "text", ): """Initialize with Typesense client.""" try: from typesense import Client except ImportError: raise ImportError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) if not isinstance(typesense_client, Client): raise ValueError( f"typesense_client should be an instance of typesense.Client, " f"got {type(typesense_client)}" ) self._typesense_client = typesense_client self._embedding = embedding self._typesense_collection_name = ( typesense_collection_name or f"langchain-{str(uuid.uuid4())}" ) self._text_key = text_key @property def _collection(self) -> Collection: return self._typesense_client.collections[self._typesense_collection_name] @property def embeddings(self) -> Embeddings: return self._embedding def _prep_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[dict]: """Embed and create the documents""" _ids = ids or (str(uuid.uuid4()) for _ in texts) _metadatas: Iterable[dict] = metadatas or ({} for _ in texts) embedded_texts = self._embedding.embed_documents(list(texts)) return [ {"id": _id, "vec": vec, f"{self._text_key}": text, "metadata": metadata} for _id, vec, text, metadata in zip(_ids, embedded_texts, texts, _metadatas) ] def _create_collection(self, num_dim: int) -> None: fields = [ {"name": "vec", "type": "float[]", "num_dim": num_dim}, {"name": f"{self._text_key}", "type": "string"}, {"name": ".*", "type": "auto"}, ] self._typesense_client.collections.create( {"name": self._typesense_collection_name, "fields": fields} ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids from adding the texts into the vectorstore. """ from typesense.exceptions import ObjectNotFound docs = self._prep_texts(texts, metadatas, ids) try: self._collection.documents.import_(docs, {"action": "upsert"}) except ObjectNotFound: # Create the collection if it doesn't already exist self._create_collection(len(docs[0]["vec"])) self._collection.documents.import_(docs, {"action": "upsert"}) return [doc["id"] for doc in docs] def similarity_search_with_score( self, query: str, k: int = 10, filter: Optional[str] = "", ) -> List[Tuple[Document, float]]: """Return typesense documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ embedded_query = [str(x) for x in self._embedding.embed_query(query)] query_obj = { "q": "*", "vector_query": f'vec:([{",".join(embedded_query)}], k:{k})', "filter_by": filter, "collection": self._typesense_collection_name, } docs = [] response = self._typesense_client.multi_search.perform( {"searches": [query_obj]}, {} ) for hit in response["results"][0]["hits"]: document = hit["document"] metadata = document["metadata"] text = document[self._text_key] score = hit["vector_distance"] docs.append((Document(page_content=text, metadata=metadata), score)) return docs def similarity_search( self, query: str, k: int = 10, filter: Optional[str] = "", **kwargs: Any, ) -> List[Document]: """Return typesense documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. Minimum 10 results would be returned. filter: typesense filter_by expression to filter documents on Returns: List of Documents most similar to the query and score for each """ docs_and_score = self.similarity_search_with_score(query, k=k, filter=filter) return [doc for doc, _ in docs_and_score] @classmethod def from_client_params( cls, embedding: Embeddings, *, host: str = "localhost", port: Union[str, int] = "8108", protocol: str = "http", typesense_api_key: Optional[str] = None, connection_timeout_seconds: int = 2, **kwargs: Any, ) -> Typesense: """Initialize Typesense directly from client parameters. Example: .. code-block:: python from langchain_community.embedding.openai import OpenAIEmbeddings from langchain_community.vectorstores import Typesense # Pass in typesense_api_key as kwarg or set env var "TYPESENSE_API_KEY". vectorstore = Typesense( OpenAIEmbeddings(), host="localhost", port="8108", protocol="http", typesense_collection_name="langchain-memory", ) """ try: from typesense import Client except ImportError: raise ImportError( "Could not import typesense python package. " "Please install it with `pip install typesense`." ) node = { "host": host, "port": str(port), "protocol": protocol, } typesense_api_key = typesense_api_key or get_from_env( "typesense_api_key", "TYPESENSE_API_KEY" ) client_config = { "nodes": [node], "api_key": typesense_api_key, "connection_timeout_seconds": connection_timeout_seconds, } return cls(Client(client_config), embedding, **kwargs) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, typesense_client: Optional[Client] = None, typesense_client_params: Optional[dict] = None, typesense_collection_name: Optional[str] = None, text_key: str = "text", **kwargs: Any, ) -> Typesense: """Construct Typesense wrapper from raw text.""" if typesense_client: vectorstore = cls(typesense_client, embedding, **kwargs) elif typesense_client_params: vectorstore = cls.from_client_params( embedding, **typesense_client_params, **kwargs ) else: raise ValueError( "Must specify one of typesense_client or typesense_client_params." ) vectorstore.add_texts(texts, metadatas=metadatas, ids=ids) return vectorstore
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/infinispanvs.py
"""Module providing Infinispan as a VectorStore""" from __future__ import annotations import json import logging import uuid import warnings from typing import Any, Iterable, List, Optional, Tuple, Type, Union, cast from httpx import Response from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore logger = logging.getLogger(__name__) class InfinispanVS(VectorStore): """`Infinispan` VectorStore interface. This class exposes the method to present Infinispan as a VectorStore. It relies on the Infinispan class (below) which takes care of the REST interface with the server. Example: ... code-block:: python from langchain_community.vectorstores import InfinispanVS from mymodels import RGBEmbeddings ... vectorDb = InfinispanVS.from_documents(docs, embedding=RGBEmbeddings(), output_fields=["texture", "color"], lambda_key=lambda text,meta: str(meta["_key"]), lambda_content=lambda item: item["color"]) or an empty InfinispanVS instance can be created if preliminary setup is required before populating the store ... code-block:: python from langchain_community.vectorstores import InfinispanVS from mymodels import RGBEmbeddings ... ispnVS = InfinispanVS() # configure Infinispan here # i.e. create cache and schema # then populate the store vectorDb = InfinispanVS.from_documents(docs, embedding=RGBEmbeddings(), output_fields: ["texture", "color"], lambda_key: lambda text,meta: str(meta["_key"]), lambda_content: lambda item: item["color"]) """ def __init__( self, embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, **kwargs: Any, ): """ Parameters ---------- cache_name: str Embeddings cache name. Default "vector" entity_name: str Protobuf entity name for the embeddings. Default "vector" text_field: str Protobuf field name for text. Default "text" vector_field: str Protobuf field name for vector. Default "vector" lambda_content: lambda Lambda returning the content part of an item. Default returns text_field lambda_metadata: lambda Lambda returning the metadata part of an item. Default returns items fields excepts text_field, vector_field, _type output_fields: List[str] List of fields to be returned from item, if None return all fields. Default None kwargs: Any Rest of arguments passed to Infinispan. See docs""" self.ispn = Infinispan(**kwargs) self._configuration = kwargs self._cache_name = str(self._configuration.get("cache_name", "vector")) self._entity_name = str(self._configuration.get("entity_name", "vector")) self._embedding = embedding self._textfield = self._configuration.get("textfield", "") if self._textfield == "": self._textfield = self._configuration.get("text_field", "text") else: warnings.warn( "`textfield` is deprecated. Please use `text_field` " "param.", DeprecationWarning, ) self._vectorfield = self._configuration.get("vectorfield", "") if self._vectorfield == "": self._vectorfield = self._configuration.get("vector_field", "vector") else: warnings.warn( "`vectorfield` is deprecated. Please use `vector_field` " "param.", DeprecationWarning, ) self._to_content = self._configuration.get( "lambda_content", lambda item: self._default_content(item) ) self._to_metadata = self._configuration.get( "lambda_metadata", lambda item: self._default_metadata(item) ) self._output_fields = self._configuration.get("output_fields") self._ids = ids def _default_metadata(self, item: dict) -> dict: meta = dict(item) meta.pop(self._vectorfield, None) meta.pop(self._textfield, None) meta.pop("_type", None) return meta def _default_content(self, item: dict[str, Any]) -> Any: return item.get(self._textfield) def schema_builder(self, templ: dict, dimension: int) -> str: metadata_proto_tpl = """ /** * @Indexed */ message %s { /** * @Vector(dimension=%d) */ repeated float %s = 1; """ metadata_proto = metadata_proto_tpl % ( self._entity_name, dimension, self._vectorfield, ) idx = 2 for f, v in templ.items(): if isinstance(v, str): metadata_proto += "optional string " + f + " = " + str(idx) + ";\n" elif isinstance(v, int): metadata_proto += "optional int64 " + f + " = " + str(idx) + ";\n" elif isinstance(v, float): metadata_proto += "optional double " + f + " = " + str(idx) + ";\n" elif isinstance(v, bytes): metadata_proto += "optional bytes " + f + " = " + str(idx) + ";\n" elif isinstance(v, bool): metadata_proto += "optional bool " + f + " = " + str(idx) + ";\n" else: raise Exception( "Unable to build proto schema for metadata. " "Unhandled type for field: " + f ) idx += 1 metadata_proto += "}\n" return metadata_proto def schema_create(self, proto: str) -> Response: """Deploy the schema for the vector db Args: proto(str): protobuf schema Returns: An http Response containing the result of the operation """ return self.ispn.schema_post(self._entity_name + ".proto", proto) def schema_delete(self) -> Response: """Delete the schema for the vector db Returns: An http Response containing the result of the operation """ return self.ispn.schema_delete(self._entity_name + ".proto") def cache_create(self, config: str = "") -> Response: """Create the cache for the vector db Args: config(str): configuration of the cache. Returns: An http Response containing the result of the operation """ if config == "": config = ( ''' { "distributed-cache": { "owners": "2", "mode": "SYNC", "statistics": true, "encoding": { "media-type": "application/x-protostream" }, "indexing": { "enabled": true, "storage": "filesystem", "startup-mode": "AUTO", "indexing-mode": "AUTO", "indexed-entities": [ "''' + self._entity_name + """" ] } } } """ ) return self.ispn.cache_post(self._cache_name, config) def cache_delete(self) -> Response: """Delete the cache for the vector db Returns: An http Response containing the result of the operation """ return self.ispn.cache_delete(self._cache_name) def cache_clear(self) -> Response: """Clear the cache for the vector db Returns: An http Response containing the result of the operation """ return self.ispn.cache_clear(self._cache_name) def cache_exists(self) -> bool: """Checks if the cache exists Returns: true if exists """ return self.ispn.cache_exists(self._cache_name) def cache_index_clear(self) -> Response: """Clear the index for the vector db Returns: An http Response containing the result of the operation """ return self.ispn.index_clear(self._cache_name) def cache_index_reindex(self) -> Response: """Rebuild the for the vector db Returns: An http Response containing the result of the operation """ return self.ispn.index_reindex(self._cache_name) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, last_vector: Optional[List[float]] = None, **kwargs: Any, ) -> List[str]: result = [] texts_l = list(texts) if last_vector: texts_l.pop() embeds = self._embedding.embed_documents(texts_l) # type: ignore if last_vector: embeds.append(last_vector) if not metadatas: metadatas = [{} for _ in texts] ids = self._ids or [str(uuid.uuid4()) for _ in texts] data_input = list(zip(metadatas, embeds, ids)) for metadata, embed, key in data_input: data = {"_type": self._entity_name, self._vectorfield: embed} data.update(metadata) data_str = json.dumps(data) self.ispn.put(key, data_str, self._cache_name) result.append(key) return result def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" documents = self.similarity_search_with_score(query=query, k=k) return [doc for doc, _ in documents] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score. Args: query (str): The text being searched. k (int, optional): The amount of results to return. Defaults to 4. Returns: List[Tuple[Document, float]] """ embed = self._embedding.embed_query(query) # type: ignore documents = self.similarity_search_with_score_by_vector(embedding=embed, k=k) return documents def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: res = self.similarity_search_with_score_by_vector(embedding, k) return [doc for doc, _ in res] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of pair (Documents, score) most similar to the query vector. """ if self._output_fields is None: query_str = ( "select v, score(v) from " + self._entity_name + " v where v." + self._vectorfield + " <-> " + json.dumps(embedding) + "~" + str(k) ) else: query_proj = "select " for field in self._output_fields[:-1]: query_proj = query_proj + "v." + field + "," query_proj = query_proj + "v." + self._output_fields[-1] query_str = ( query_proj + ", score(v) from " + self._entity_name + " v where v." + self._vectorfield + " <-> " + json.dumps(embedding) + "~" + str(k) ) query_res = self.ispn.req_query(query_str, self._cache_name) result = json.loads(query_res.text) return self._query_result_to_docs(result) def _query_result_to_docs( self, result: dict[str, Any] ) -> List[Tuple[Document, float]]: documents = [] for row in result["hits"]: hit = row["hit"] or {} if self._output_fields is None: entity = hit["*"] else: entity = {key: hit.get(key) for key in self._output_fields} doc = Document( page_content=self._to_content(entity), metadata=self._to_metadata(entity), ) documents.append((doc, hit["score()"])) return documents def configure(self, metadata: dict, dimension: int) -> None: schema = self.schema_builder(metadata, dimension) output = self.schema_create(schema) assert ( output.status_code == self.ispn.Codes.OK ), "Unable to create schema. Already exists? " "Consider using clear_old=True" assert json.loads(output.text)["error"] is None if not self.cache_exists(): output = self.cache_create() assert ( output.status_code == self.ispn.Codes.OK ), "Unable to create cache. Already exists? " "Consider using clear_old=True" # Ensure index is clean self.cache_index_clear() def config_clear(self) -> None: self.schema_delete() self.cache_delete() @classmethod def from_texts( cls: Type[InfinispanVS], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, clear_old: Optional[bool] = True, auto_config: Optional[bool] = True, **kwargs: Any, ) -> InfinispanVS: """Return VectorStore initialized from texts and embeddings. In addition to parameters described by the super method, this implementation provides other configuration params if different configuration from default is needed. Parameters ---------- ids : List[str] Additional list of keys associated to the embedding. If not provided UUIDs will be generated clear_old : bool Whether old data must be deleted. Default True auto_config: bool Whether to do a complete server setup (caches, protobuf definition...). Default True kwargs: Any Rest of arguments passed to InfinispanVS. See docs""" infinispanvs = cls(embedding=embedding, ids=ids, **kwargs) if auto_config and len(metadatas or []) > 0: if clear_old: infinispanvs.config_clear() vec = embedding.embed_query(texts[len(texts) - 1]) metadatas = cast(List[dict], metadatas) infinispanvs.configure(metadatas[0], len(vec)) else: if clear_old: infinispanvs.cache_clear() vec = embedding.embed_query(texts[len(texts) - 1]) if texts: infinispanvs.add_texts(texts, metadatas, vector=vec) return infinispanvs REST_TIMEOUT = 10 class Infinispan: """Helper class for `Infinispan` REST interface. This class exposes the Infinispan operations needed to create and set up a vector db. You need a running Infinispan (15+) server without authentication. You can easily start one, see: https://github.com/rigazilla/infinispan-vector#run-infinispan """ def __init__( self, schema: str = "http", user: str = "", password: str = "", hosts: List[str] = ["127.0.0.1:11222"], cache_url: str = "/rest/v2/caches", schema_url: str = "/rest/v2/schemas", use_post_for_query: bool = True, http2: bool = True, verify: bool = True, **kwargs: Any, ): """ Parameters ---------- schema: str Schema for HTTP request: "http" or "https". Default "http" user, password: str User and password if auth is required. Default None hosts: List[str] List of server addresses. Default ["127.0.0.1:11222"] cache_url: str URL endpoint for cache API. Default "/rest/v2/caches" schema_url: str URL endpoint for schema API. Default "/rest/v2/schemas" use_post_for_query: bool Whether POST method should be used for query. Default True http2: bool Whether HTTP/2 protocol should be used. `pip install "httpx[http2]"` is needed for HTTP/2. Default True verify: bool Whether TLS certificate must be verified. Default True """ try: import httpx except ImportError: raise ImportError( "Could not import httpx python package. " "Please install it with `pip install httpx`" 'or `pip install "httpx[http2]"` if you need HTTP/2.' ) self.Codes = httpx.codes self._configuration = kwargs self._schema = schema self._user = user self._password = password self._host = hosts[0] self._default_node = self._schema + "://" + self._host self._cache_url = cache_url self._schema_url = schema_url self._use_post_for_query = use_post_for_query self._http2 = http2 if self._user and self._password: if self._schema == "http": auth: Union[Tuple[str, str], httpx.DigestAuth] = httpx.DigestAuth( username=self._user, password=self._password ) else: auth = (self._user, self._password) self._h2c = httpx.Client( http2=self._http2, http1=not self._http2, auth=auth, verify=verify, ) else: self._h2c = httpx.Client( http2=self._http2, http1=not self._http2, verify=verify, ) def req_query(self, query: str, cache_name: str, local: bool = False) -> Response: """Request a query Args: query(str): query requested cache_name(str): name of the target cache local(boolean): whether the query is local to clustered Returns: An http Response containing the result set or errors """ if self._use_post_for_query: return self._query_post(query, cache_name, local) return self._query_get(query, cache_name, local) def _query_post( self, query_str: str, cache_name: str, local: bool = False ) -> Response: api_url = ( self._default_node + self._cache_url + "/" + cache_name + "?action=search&local=" + str(local) ) data = {"query": query_str} data_json = json.dumps(data) response = self._h2c.post( api_url, content=data_json, headers={"Content-Type": "application/json"}, timeout=REST_TIMEOUT, ) return response def _query_get( self, query_str: str, cache_name: str, local: bool = False ) -> Response: api_url = ( self._default_node + self._cache_url + "/" + cache_name + "?action=search&query=" + query_str + "&local=" + str(local) ) response = self._h2c.get(api_url, timeout=REST_TIMEOUT) return response def post(self, key: str, data: str, cache_name: str) -> Response: """Post an entry Args: key(str): key of the entry data(str): content of the entry in json format cache_name(str): target cache Returns: An http Response containing the result of the operation """ api_url = self._default_node + self._cache_url + "/" + cache_name + "/" + key response = self._h2c.post( api_url, content=data, headers={"Content-Type": "application/json"}, timeout=REST_TIMEOUT, ) return response def put(self, key: str, data: str, cache_name: str) -> Response: """Put an entry Args: key(str): key of the entry data(str): content of the entry in json format cache_name(str): target cache Returns: An http Response containing the result of the operation """ api_url = self._default_node + self._cache_url + "/" + cache_name + "/" + key response = self._h2c.put( api_url, content=data, headers={"Content-Type": "application/json"}, timeout=REST_TIMEOUT, ) return response def get(self, key: str, cache_name: str) -> Response: """Get an entry Args: key(str): key of the entry cache_name(str): target cache Returns: An http Response containing the entry or errors """ api_url = self._default_node + self._cache_url + "/" + cache_name + "/" + key response = self._h2c.get( api_url, headers={"Content-Type": "application/json"}, timeout=REST_TIMEOUT ) return response def schema_post(self, name: str, proto: str) -> Response: """Deploy a schema Args: name(str): name of the schema. Will be used as a key proto(str): protobuf schema Returns: An http Response containing the result of the operation """ api_url = self._default_node + self._schema_url + "/" + name response = self._h2c.post(api_url, content=proto, timeout=REST_TIMEOUT) return response def cache_post(self, name: str, config: str) -> Response: """Create a cache Args: name(str): name of the cache. config(str): configuration of the cache. Returns: An http Response containing the result of the operation """ api_url = self._default_node + self._cache_url + "/" + name response = self._h2c.post( api_url, content=config, headers={"Content-Type": "application/json"}, timeout=REST_TIMEOUT, ) return response def schema_delete(self, name: str) -> Response: """Delete a schema Args: name(str): name of the schema. Returns: An http Response containing the result of the operation """ api_url = self._default_node + self._schema_url + "/" + name response = self._h2c.delete(api_url, timeout=REST_TIMEOUT) return response def cache_delete(self, name: str) -> Response: """Delete a cache Args: name(str): name of the cache. Returns: An http Response containing the result of the operation """ api_url = self._default_node + self._cache_url + "/" + name response = self._h2c.delete(api_url, timeout=REST_TIMEOUT) return response def cache_clear(self, cache_name: str) -> Response: """Clear a cache Args: cache_name(str): name of the cache. Returns: An http Response containing the result of the operation """ api_url = ( self._default_node + self._cache_url + "/" + cache_name + "?action=clear" ) response = self._h2c.post(api_url, timeout=REST_TIMEOUT) return response def cache_exists(self, cache_name: str) -> bool: """Check if a cache exists Args: cache_name(str): name of the cache. Returns: True if cache exists """ api_url = ( self._default_node + self._cache_url + "/" + cache_name + "?action=clear" ) return self.resource_exists(api_url) def resource_exists(self, api_url: str) -> bool: """Check if a resource exists Args: api_url(str): url of the resource. Returns: true if resource exists """ response = self._h2c.head(api_url, timeout=REST_TIMEOUT) return response.status_code == self.Codes.OK def index_clear(self, cache_name: str) -> Response: """Clear an index on a cache Args: cache_name(str): name of the cache. Returns: An http Response containing the result of the operation """ api_url = ( self._default_node + self._cache_url + "/" + cache_name + "/search/indexes?action=clear" ) return self._h2c.post(api_url, timeout=REST_TIMEOUT) def index_reindex(self, cache_name: str) -> Response: """Rebuild index on a cache Args: cache_name(str): name of the cache. Returns: An http Response containing the result of the operation """ api_url = ( self._default_node + self._cache_url + "/" + cache_name + "/search/indexes?action=reindex" ) return self._h2c.post(api_url, timeout=REST_TIMEOUT)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/ecloud_vector_search.py
import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, ) from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from elasticsearch import Elasticsearch logger = logging.getLogger(__name__) class EcloudESVectorStore(VectorStore): """`ecloud Elasticsearch` vector store. Example: .. code-block:: python from langchain.vectorstores import EcloudESVectorStore from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = EcloudESVectorStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", es_url="http://localhost:9200" ) Args: index_name: Name of the Elasticsearch index to create. es_url: URL of the ecloud Elasticsearch instance to connect to. user: Username to use when connecting to Elasticsearch. password: Password to use when connecting to Elasticsearch. """ def __init__( self, index_name: str, es_url: str, user: Optional[str] = None, password: Optional[str] = None, embedding: Optional[Embeddings] = None, **kwargs: Optional[dict], ) -> None: self.embedding = embedding self.index_name = index_name self.text_field = kwargs.get("text_field", "text") self.vector_field = kwargs.get("vector_field", "vector") self.vector_type = kwargs.get("vector_type", "knn_dense_float_vector") self.vector_params = kwargs.get("vector_params") or {} self.model = self.vector_params.get("model", "") self.index_settings = kwargs.get("index_settings") or {} key_list = [ "text_field", "vector_field", "vector_type", "vector_params", "index_settings", ] [kwargs.pop(key, None) for key in key_list] if es_url is not None: self.client = EcloudESVectorStore.es_client( es_url=es_url, username=user, password=password, **kwargs ) else: raise ValueError("""Please specified a es connection url.""") @property def embeddings(self) -> Optional[Embeddings]: return self.embedding @staticmethod def es_client( *, es_url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, **kwargs: Optional[dict], ) -> "Elasticsearch": try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) connection_params: Dict[str, Any] = {"hosts": [es_url]} if username and password: connection_params["http_auth"] = (username, password) connection_params.update(kwargs) es_client = elasticsearch.Elasticsearch(**connection_params) try: es_client.info() except Exception as e: logger.error(f"Error connecting to Elasticsearch: {e}") raise e return es_client def _create_index_if_not_exists(self, dims_length: Optional[int] = None) -> None: """Create the index if it doesn't already exist. Args: dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=self.index_name): logger.info(f"Index {self.index_name} already exists. Skipping creation.") else: if dims_length is None: raise ValueError( "Cannot create index without specifying dims_length " + "when the index doesn't already exist. " ) indexMapping = self._index_mapping(dims_length=dims_length) logger.debug( f"Creating index {self.index_name} with mappings {indexMapping}" ) self.client.indices.create( index=self.index_name, body={ "settings": {"index.knn": True, **self.index_settings}, "mappings": {"properties": indexMapping}, }, ) def _index_mapping(self, dims_length: Union[int, None]) -> Dict: """ Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. index_params: The extra pamameters for creating index. Returns: Dict: The Elasticsearch settings and mappings for the strategy. """ model = self.vector_params.get("model", "") if "lsh" == model: mapping: Dict[Any, Any] = { self.vector_field: { "type": self.vector_type, "knn": { "dims": dims_length, "model": "lsh", "similarity": self.vector_params.get("similarity", "cosine"), "L": self.vector_params.get("L", 99), "k": self.vector_params.get("k", 1), }, } } if mapping[self.vector_field]["knn"]["similarity"] == "l2": mapping[self.vector_field]["knn"]["w"] = self.vector_params.get("w", 3) return mapping elif "permutation_lsh" == model: return { self.vector_field: { "type": self.vector_type, "knn": { "dims": dims_length, "model": "permutation_lsh", "k": self.vector_params.get("k", 10), "similarity": self.vector_params.get("similarity", "cosine"), "repeating": self.vector_params.get("repeating", True), }, } } else: return { self.vector_field: { "type": self.vector_type, "knn": {"dims": dims_length}, } } def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete documents from the index. Args: ids: List of ids of documents to delete """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) body = [] if ids is None: raise ValueError("ids must be provided.") for _id in ids: body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id}) if len(body) > 0: try: bulk( self.client, body, refresh=kwargs.get("refresh_indices", True), ignore_status=404, ) logger.debug(f"Deleted {len(body)} texts from index") return True except BulkIndexError as e: logger.error(f"Error deleting texts: {e}") raise e else: logger.info("No documents to delete") return False def _query_body( self, query_vector: Union[List[float], None], filter: Optional[dict] = None, search_params: Dict = {}, ) -> Dict: query_vector_body = { "field": search_params.get("vector_field", self.vector_field) } if self.vector_type == "knn_dense_float_vector": query_vector_body["vec"] = {"values": query_vector} specific_params = self.get_dense_specific_model_similarity_params( search_params ) query_vector_body.update(specific_params) else: query_vector_body["vec"] = { "true_indices": query_vector, "total_indices": len(query_vector) if query_vector is not None else 0, } specific_params = self.get_sparse_specific_model_similarity_params( search_params ) query_vector_body.update(specific_params) query_vector_body = {"knn_nearest_neighbors": query_vector_body} if filter is not None and len(filter) != 0: query_vector_body = { "function_score": {"query": filter, "functions": [query_vector_body]} } return { "size": search_params.get("size", 4), "query": query_vector_body, } @staticmethod def get_dense_specific_model_similarity_params( search_params: Dict[str, Any], ) -> Dict: model = search_params.get("model", "exact") similarity = search_params.get("similarity", "cosine") specific_params = {"model": model, "similarity": similarity} if not model == "exact": if model not in ("lsh", "permutation_lsh"): raise ValueError( f"vector type knn_dense_float_vector doesn't support model {model}" ) if similarity not in ("cosine", "l2"): raise ValueError(f"model exact doesn't support similarity {similarity}") specific_params["candidates"] = search_params.get( "candidates", search_params.get("size", 4) ) if model == "lsh" and similarity == "l2": specific_params["probes"] = search_params.get("probes", 0) else: if similarity not in ("cosine", "l2"): raise ValueError(f"model exact don't support similarity {similarity}") return specific_params @staticmethod def get_sparse_specific_model_similarity_params( search_params: Dict[str, Any], ) -> Dict: model = search_params.get("model", "exact") similarity = search_params.get("similarity", "hamming") specific_params = {"model": model, "similarity": similarity} if not model == "exact": if model not in ("lsh",): raise ValueError( f"vector type knn_dense_float_vector doesn't support model {model}" ) if similarity not in ("hamming", "jaccard"): raise ValueError(f"model exact doesn't support similarity {similarity}") specific_params["candidates"] = search_params.get( "candidates", search_params.get("size", 4) ) else: if similarity not in ("hamming", "jaccard"): raise ValueError(f"model exact don't support similarity {similarity}") return specific_params def _search( self, query: Optional[str] = None, query_vector: Union[List[float], None] = None, filter: Optional[dict] = None, custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None, search_params: Dict = {}, ) -> List[Tuple[Document, float]]: """Return searched documents result from ecloud ES Args: query: Text to look up documents similar to. query_vector: Embedding to look up documents similar to. filter: Array of ecloud ElasticSearch filter clauses to apply to the query. custom_query: Function to modify the query body before it is sent to ES. Returns: List of Documents most similar to the query and score for each """ if self.embedding and query is not None: query_vector = self.embedding.embed_query(query) query_body = self._query_body( query_vector=query_vector, filter=filter, search_params=search_params ) if custom_query is not None: query_body = custom_query(query_body, query) logger.debug(f"Calling custom_query, Query body now: {query_body}") logger.debug(f"Query body: {query_body}") # Perform the kNN search on the ES index and return the results. response = self.client.search(index=self.index_name, body=query_body) logger.debug(f"response={response}") hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"][ search_params.get("text_field", self.text_field) ], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query, in descending order of similarity. """ results = self.similarity_search_with_score( query=query, k=k, filter=filter, **kwargs ) return [doc for doc, _ in results] def similarity_search_with_score( self, query: str, k: int, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return documents most similar to query, along with scores. Args: query: Text to look up documents similar to. size: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query and score for each """ search_params: Dict[str, Any] = kwargs.get("search_params") or {} if len(search_params) == 0: kwargs = {"search_params": {"size": k}} elif search_params.get("size") is None: search_params["size"] = k kwargs["search_params"] = search_params return self._search(query=query, filter=filter, **kwargs) @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, **kwargs: Any, ) -> "EcloudESVectorStore": """Construct EcloudESVectorStore wrapper from documents. Args: documents: List of documents to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. kwargs: create index key words arguments """ vectorStore = EcloudESVectorStore._es_vector_store( embedding=embedding, **kwargs ) # Encode the provided texts and add them to the newly created index. vectorStore.add_documents(documents) return vectorStore @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> "EcloudESVectorStore": """Construct EcloudESVectorStore wrapper from raw documents. Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. kwargs: create index key words arguments """ vectorStore = cls._es_vector_store(embedding=embedding, **kwargs) # Encode the provided texts and add them to the newly created index. vectorStore.add_texts(texts, metadatas=metadatas, **kwargs) return vectorStore def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) embeddings = [] create_index_if_not_exists = kwargs.get("create_index_if_not_exists", True) ids = kwargs.get("ids", [str(uuid.uuid4()) for _ in texts]) refresh_indices = kwargs.get("refresh_indices", False) requests = [] if self.embedding is not None: embeddings = self.embedding.embed_documents(list(texts)) dims_length = len(embeddings[0]) if create_index_if_not_exists: self._create_index_if_not_exists(dims_length=dims_length) for i, (text, vector) in enumerate(zip(texts, embeddings)): metadata = metadatas[i] if metadatas else {} doc = { "_op_type": "index", "_index": self.index_name, self.text_field: text, "metadata": metadata, "_id": ids[i], } if self.vector_type == "knn_dense_float_vector": doc[self.vector_field] = vector elif self.vector_type == "knn_sparse_bool_vector": doc[self.vector_field] = { "true_indices": vector, "total_indices": len(vector), } requests.append(doc) else: if create_index_if_not_exists: self._create_index_if_not_exists() for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} requests.append( { "_op_type": "index", "_index": self.index_name, self.text_field: text, "metadata": metadata, "_id": ids[i], } ) if len(requests) > 0: try: success, failed = bulk( self.client, requests, stats_only=True, refresh=refresh_indices ) logger.debug( f"Added {success} and failed to add {failed} texts to index" ) logger.debug(f"added texts {ids} to index") if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to add to index") return [] @staticmethod def _es_vector_store( embedding: Optional[Embeddings] = None, **kwargs: Any ) -> "EcloudESVectorStore": index_name = kwargs.get("index_name") if index_name is None: raise ValueError("Please provide an index_name.") es_url = kwargs.get("es_url") if es_url is None: raise ValueError("Please provided a valid es connection url") return EcloudESVectorStore(embedding=embedding, **kwargs)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/clarifai.py
from __future__ import annotations import logging import os import traceback import uuid from concurrent.futures import ThreadPoolExecutor from typing import Any, Iterable, List, Optional, Tuple import requests from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore logger = logging.getLogger(__name__) class Clarifai(VectorStore): """`Clarifai AI` vector store. To use, you should have the ``clarifai`` python SDK package installed. Example: .. code-block:: python from langchain_community.vectorstores import Clarifai clarifai_vector_db = Clarifai( user_id=USER_ID, app_id=APP_ID, number_of_docs=NUMBER_OF_DOCS, ) """ def __init__( self, user_id: Optional[str] = None, app_id: Optional[str] = None, number_of_docs: Optional[int] = 4, pat: Optional[str] = None, token: Optional[str] = None, api_base: Optional[str] = "https://api.clarifai.com", ) -> None: """Initialize with Clarifai client. Args: user_id (Optional[str], optional): User ID. Defaults to None. app_id (Optional[str], optional): App ID. Defaults to None. pat (Optional[str], optional): Personal access token. Defaults to None. token (Optional[str], optional): Session token. Defaults to None. number_of_docs (Optional[int], optional): Number of documents to return during vector search. Defaults to None. api_base (Optional[str], optional): API base. Defaults to None. Raises: ValueError: If user ID, app ID or personal access token is not provided. """ _user_id = user_id or os.environ.get("CLARIFAI_USER_ID") _app_id = app_id or os.environ.get("CLARIFAI_APP_ID") if _user_id is None or _app_id is None: raise ValueError( "Could not find CLARIFAI_USER_ID " "or CLARIFAI_APP_ID in your environment. " "Please set those env variables with a valid user ID, app ID" ) self._number_of_docs = number_of_docs try: from clarifai.client.search import Search except ImportError as e: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) from e self._auth = Search( user_id=_user_id, app_id=_app_id, top_k=number_of_docs, pat=pat, token=token, base_url=api_base, ).auth_helper def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add texts to the Clarifai vectorstore. This will push the text to a Clarifai application. Application use a base workflow that create and store embedding for each text. Make sure you are using a base workflow that is compatible with text (such as Language Understanding). Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. """ try: from clarifai.client.input import Inputs from google.protobuf.struct_pb2 import Struct except ImportError as e: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) from e ltexts = list(texts) length = len(ltexts) assert length > 0, "No texts provided to add to the vectorstore." if metadatas is not None: assert length == len( metadatas ), "Number of texts and metadatas should be the same." if ids is not None: assert len(ltexts) == len( ids ), "Number of text inputs and input ids should be the same." input_obj = Inputs.from_auth_helper(auth=self._auth) batch_size = 32 input_job_ids = [] for idx in range(0, length, batch_size): try: batch_texts = ltexts[idx : idx + batch_size] batch_metadatas = ( metadatas[idx : idx + batch_size] if metadatas else None ) if ids is None: batch_ids = [uuid.uuid4().hex for _ in range(len(batch_texts))] else: batch_ids = ids[idx : idx + batch_size] if batch_metadatas is not None: meta_list = [] for meta in batch_metadatas: meta_struct = Struct() meta_struct.update(meta) meta_list.append(meta_struct) input_batch = [ input_obj.get_text_input( input_id=batch_ids[i], raw_text=text, metadata=meta_list[i] if batch_metadatas else None, ) for i, text in enumerate(batch_texts) ] result_id = input_obj.upload_inputs(inputs=input_batch) input_job_ids.extend(result_id) logger.debug("Input posted successfully.") except Exception as error: logger.warning(f"Post inputs failed: {error}") traceback.print_exc() return input_job_ids def similarity_search_with_score( self, query: str, k: Optional[int] = None, filters: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with score using Clarifai. Args: query (str): Query text to search for. k (Optional[int]): Number of results to return. If not set, it'll take _number_of_docs. Defaults to None. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of documents most similar to the query text. """ try: from clarifai.client.search import Search from clarifai_grpc.grpc.api import resources_pb2 from google.protobuf import json_format # type: ignore except ImportError as e: raise ImportError( "Could not import clarifai python package. " "Please install it with `pip install clarifai`." ) from e # Get number of docs to return top_k = k or self._number_of_docs search_obj = Search.from_auth_helper(auth=self._auth, top_k=top_k) rank = [{"text_raw": query}] # Add filter by metadata if provided. if filters is not None: search_metadata = {"metadata": filters} search_response = search_obj.query(ranks=rank, filters=[search_metadata]) else: search_response = search_obj.query(ranks=rank) # Retrieve hits hits = [hit for data in search_response for hit in data.hits] executor = ThreadPoolExecutor(max_workers=10) def hit_to_document(hit: resources_pb2.Hit) -> Tuple[Document, float]: metadata = json_format.MessageToDict(hit.input.data.metadata) h = dict(self._auth.metadata) request = requests.get(hit.input.data.text.url, headers=h) # override encoding by real educated guess as provided by chardet request.encoding = request.apparent_encoding requested_text = request.text logger.debug( f"\tScore {hit.score:.2f} for annotation: {hit.annotation.id}\ off input: {hit.input.id}, text: {requested_text[:125]}" ) return (Document(page_content=requested_text, metadata=metadata), hit.score) # Iterate over hits and retrieve metadata and text futures = [executor.submit(hit_to_document, hit) for hit in hits] docs_and_scores = [future.result() for future in futures] return docs_and_scores def similarity_search( self, query: str, k: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search using Clarifai. Args: query: Text to look up documents similar to. k: Number of Documents to return. If not set, it'll take _number_of_docs. Defaults to None. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score(query, k=k, **kwargs) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, user_id: Optional[str] = None, app_id: Optional[str] = None, number_of_docs: Optional[int] = None, pat: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> Clarifai: """Create a Clarifai vectorstore from a list of texts. Args: user_id (str): User ID. app_id (str): App ID. texts (List[str]): List of texts to add. number_of_docs (Optional[int]): Number of documents to return during vector search. Defaults to None. pat (Optional[str], optional): Personal access token. Defaults to None. token (Optional[str], optional): Session token. Defaults to None. metadatas (Optional[List[dict]]): Optional list of metadatas. Defaults to None. kwargs: Additional keyword arguments to be passed to the Search. Returns: Clarifai: Clarifai vectorstore. """ clarifai_vector_db = cls( user_id=user_id, app_id=app_id, number_of_docs=number_of_docs, pat=pat, token=token, **kwargs, ) clarifai_vector_db.add_texts(texts=texts, metadatas=metadatas) return clarifai_vector_db @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, user_id: Optional[str] = None, app_id: Optional[str] = None, number_of_docs: Optional[int] = None, pat: Optional[str] = None, token: Optional[str] = None, **kwargs: Any, ) -> Clarifai: """Create a Clarifai vectorstore from a list of documents. Args: user_id (str): User ID. app_id (str): App ID. documents (List[Document]): List of documents to add. number_of_docs (Optional[int]): Number of documents to return during vector search. Defaults to None. pat (Optional[str], optional): Personal access token. Defaults to None. token (Optional[str], optional): Session token. Defaults to None. kwargs: Additional keyword arguments to be passed to the Search. Returns: Clarifai: Clarifai vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( user_id=user_id, app_id=app_id, texts=texts, number_of_docs=number_of_docs, pat=pat, metadatas=metadatas, token=token, **kwargs, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/pgvecto_rs.py
from __future__ import annotations import uuid from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Union from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore class PGVecto_rs(VectorStore): """VectorStore backed by pgvecto_rs.""" _store = None _embedding: Embeddings def __init__( self, embedding: Embeddings, dimension: int, db_url: str, collection_name: str, new_table: bool = False, ) -> None: """Initialize a PGVecto_rs vectorstore. Args: embedding: Embeddings to use. dimension: Dimension of the embeddings. db_url: Database URL. collection_name: Name of the collection. new_table: Whether to create a new table or connect to an existing one. If true, the table will be dropped if exists, then recreated. Defaults to False. """ try: from pgvecto_rs.sdk import PGVectoRs except ImportError as e: raise ImportError( "Unable to import pgvector_rs.sdk , please install with " '`pip install "pgvecto_rs[sdk]"`.' ) from e self._store = PGVectoRs( db_url=db_url, collection_name=collection_name, dimension=dimension, recreate=new_table, ) self._embedding = embedding # ================ Create interface ================= @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, db_url: str = "", collection_name: str = str(uuid.uuid4().hex), **kwargs: Any, ) -> PGVecto_rs: """Return VectorStore initialized from texts and optional metadatas.""" sample_embedding = embedding.embed_query("Hello pgvecto_rs!") dimension = len(sample_embedding) if db_url is None: raise ValueError("db_url must be provided") _self: PGVecto_rs = cls( embedding=embedding, dimension=dimension, db_url=db_url, collection_name=collection_name, ) _self.add_texts(texts, metadatas, **kwargs) return _self @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, db_url: str = "", collection_name: str = str(uuid.uuid4().hex), **kwargs: Any, ) -> PGVecto_rs: """Return VectorStore initialized from documents.""" texts = [document.page_content for document in documents] metadatas = [document.metadata for document in documents] return cls.from_texts( texts, embedding, metadatas, db_url, collection_name, **kwargs ) @classmethod def from_collection_name( cls, embedding: Embeddings, db_url: str, collection_name: str, ) -> PGVecto_rs: """Create new empty vectorstore with collection_name. Or connect to an existing vectorstore in database if exists. Arguments should be the same as when the vectorstore was created.""" sample_embedding = embedding.embed_query("Hello pgvecto_rs!") return cls( embedding=embedding, dimension=len(sample_embedding), db_url=db_url, collection_name=collection_name, ) # ================ Insert interface ================= def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids of the added texts. """ from pgvecto_rs.sdk import Record embeddings = self._embedding.embed_documents(list(texts)) records = [ Record.from_text(text, embedding, meta) for text, embedding, meta in zip(texts, embeddings, metadatas or []) ] self._store.insert(records) # type: ignore[union-attr] return [str(record.id) for record in records] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]): List of documents to add to the vectorstore. Returns: List of ids of the added documents. """ return self.add_texts( [document.page_content for document in documents], [document.metadata for document in documents], **kwargs, ) # ================ Query interface ================= def similarity_search_with_score_by_vector( self, query_vector: List[float], k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", filter: Union[None, Dict[str, Any], Any] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query vector, with its score.""" from pgvecto_rs.sdk.filters import meta_contains distance_func_map = { "sqrt_euclid": "<->", "neg_dot_prod": "<#>", "ned_cos": "<=>", } if filter is None: real_filter = None elif isinstance(filter, dict): real_filter = meta_contains(filter) else: real_filter = filter results = self._store.search( # type: ignore[union-attr] query_vector, distance_func_map[distance_func], k, filter=real_filter, ) return [ ( Document( page_content=res[0].text, metadata=res[0].meta, ), res[1], ) for res in results ] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", filter: Optional[Any] = None, **kwargs: Any, ) -> List[Document]: return [ doc for doc, _score in self.similarity_search_with_score_by_vector( embedding, k, distance_func, **kwargs ) ] def similarity_search_with_score( self, query: str, k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", **kwargs: Any, ) -> List[Tuple[Document, float]]: query_vector = self._embedding.embed_query(query) return self.similarity_search_with_score_by_vector( query_vector, k, distance_func, **kwargs ) def similarity_search( self, query: str, k: int = 4, distance_func: Literal[ "sqrt_euclid", "neg_dot_prod", "ned_cos" ] = "sqrt_euclid", **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" query_vector = self._embedding.embed_query(query) return [ doc for doc, _score in self.similarity_search_with_score_by_vector( query_vector, k, distance_func, **kwargs ) ]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/lancedb.py
from __future__ import annotations import base64 import os import uuid import warnings from typing import Any, Callable, Dict, Iterable, List, Optional, Type import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import guard_import from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance DEFAULT_K = 4 # Number of Documents to return. def import_lancedb() -> Any: """Import lancedb package.""" return guard_import("lancedb") def to_lance_filter(filter: Dict[str, str]) -> str: """Converts a dict filter to a LanceDB filter string.""" return " AND ".join([f"{k} = '{v}'" for k, v in filter.items()]) class LanceDB(VectorStore): """`LanceDB` vector store. To use, you should have ``lancedb`` python package installed. You can install it with ``pip install lancedb``. Args: connection: LanceDB connection to use. If not provided, a new connection will be created. embedding: Embedding to use for the vectorstore. vector_key: Key to use for the vector in the database. Defaults to ``vector``. id_key: Key to use for the id in the database. Defaults to ``id``. text_key: Key to use for the text in the database. Defaults to ``text``. table_name: Name of the table to use. Defaults to ``vectorstore``. api_key: API key to use for LanceDB cloud database. region: Region to use for LanceDB cloud database. mode: Mode to use for adding data to the table. Valid values are ``append`` and ``overwrite``. Defaults to ``overwrite``. Example: .. code-block:: python vectorstore = LanceDB(uri='/lancedb', embedding_function) vectorstore.add_texts(['text1', 'text2']) result = vectorstore.similarity_search('text1') """ def __init__( self, connection: Optional[Any] = None, embedding: Optional[Embeddings] = None, uri: Optional[str] = "/tmp/lancedb", vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", table_name: Optional[str] = "vectorstore", api_key: Optional[str] = None, region: Optional[str] = None, mode: Optional[str] = "overwrite", table: Optional[Any] = None, distance: Optional[str] = "l2", reranker: Optional[Any] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, limit: int = DEFAULT_K, ): """Initialize with Lance DB vectorstore""" lancedb = guard_import("lancedb") self._embedding = embedding self._vector_key = vector_key self._id_key = id_key self._text_key = text_key self.api_key = api_key or os.getenv("LANCE_API_KEY") if api_key != "" else None self.region = region self.mode = mode self.distance = distance self.override_relevance_score_fn = relevance_score_fn self.limit = limit self._fts_index = None if isinstance(reranker, lancedb.rerankers.Reranker): self._reranker = reranker elif reranker is None: self._reranker = None else: raise ValueError( "`reranker` has to be a lancedb.rerankers.Reranker object." ) if isinstance(uri, str) and self.api_key is None: if uri.startswith("db://"): raise ValueError("API key is required for LanceDB cloud.") if self._embedding is None: raise ValueError("embedding object should be provided") if isinstance(connection, lancedb.db.LanceDBConnection): self._connection = connection elif isinstance(connection, (str, lancedb.db.LanceTable)): raise ValueError( "`connection` has to be a lancedb.db.LanceDBConnection object.\ `lancedb.db.LanceTable` is deprecated." ) else: if self.api_key is None: self._connection = lancedb.connect(uri) else: if isinstance(uri, str): if uri.startswith("db://"): self._connection = lancedb.connect( uri, api_key=self.api_key, region=self.region ) else: self._connection = lancedb.connect(uri) warnings.warn( "api key provided with local uri.\ The data will be stored locally" ) if table is not None: try: assert isinstance( table, (lancedb.db.LanceTable, lancedb.remote.table.RemoteTable) ) self._table = table self._table_name = ( table.name if hasattr(table, "name") else "remote_table" ) except AssertionError: raise ValueError( """`table` has to be a lancedb.db.LanceTable or lancedb.remote.table.RemoteTable object.""" ) else: self._table = self.get_table(table_name, set_default=True) def results_to_docs(self, results: Any, score: bool = False) -> Any: columns = results.schema.names if "_distance" in columns: score_col = "_distance" elif "_relevance_score" in columns: score_col = "_relevance_score" else: score_col = None if score_col is None or not score: return [ Document( page_content=results[self._text_key][idx].as_py(), metadata=results["metadata"][idx].as_py(), ) for idx in range(len(results)) ] elif score_col and score: return [ ( Document( page_content=results[self._text_key][idx].as_py(), metadata=results["metadata"][idx].as_py(), ), results[score_col][idx].as_py(), ) for idx in range(len(results)) ] @property def embeddings(self) -> Optional[Embeddings]: return self._embedding def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Turn texts into embedding and add it to the database Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. ids: Optional list of ids to associate with the texts. Returns: List of ids of the added texts. """ docs = [] ids = ids or [str(uuid.uuid4()) for _ in texts] embeddings = self._embedding.embed_documents(list(texts)) # type: ignore for idx, text in enumerate(texts): embedding = embeddings[idx] metadata = metadatas[idx] if metadatas else {"id": ids[idx]} docs.append( { self._vector_key: embedding, self._id_key: ids[idx], self._text_key: text, "metadata": metadata, } ) tbl = self.get_table() if tbl is None: tbl = self._connection.create_table(self._table_name, data=docs) self._table = tbl else: if self.api_key is None: tbl.add(docs, mode=self.mode) else: tbl.add(docs) self._fts_index = None return ids def get_table( self, name: Optional[str] = None, set_default: Optional[bool] = False ) -> Any: """ Fetches a table object from the database. Args: name (str, optional): The name of the table to fetch. Defaults to None and fetches current table object. set_default (bool, optional): Sets fetched table as the default table. Defaults to False. Returns: Any: The fetched table object. Raises: ValueError: If the specified table is not found in the database. """ if name is not None: if set_default: self._table_name = name _name = self._table_name else: _name = name else: _name = self._table_name try: return self._connection.open_table(_name) except Exception: return None def create_index( self, col_name: Optional[str] = None, vector_col: Optional[str] = None, num_partitions: Optional[int] = 256, num_sub_vectors: Optional[int] = 96, index_cache_size: Optional[int] = None, metric: Optional[str] = "L2", name: Optional[str] = None, ) -> None: """ Create a scalar(for non-vector cols) or a vector index on a table. Make sure your vector column has enough data before creating an index on it. Args: vector_col: Provide if you want to create index on a vector column. col_name: Provide if you want to create index on a non-vector column. metric: Provide the metric to use for vector index. Defaults to 'L2' choice of metrics: 'L2', 'dot', 'cosine' num_partitions: Number of partitions to use for the index. Defaults to 256. num_sub_vectors: Number of sub-vectors to use for the index. Defaults to 96. index_cache_size: Size of the index cache. Defaults to None. name: Name of the table to create index on. Defaults to None. Returns: None """ tbl = self.get_table(name) if vector_col: tbl.create_index( metric=metric, vector_column_name=vector_col, num_partitions=num_partitions, num_sub_vectors=num_sub_vectors, index_cache_size=index_cache_size, ) elif col_name: tbl.create_scalar_index(col_name) else: raise ValueError("Provide either vector_col or col_name") def encode_image(self, uri: str) -> str: """Get base64 string from image URI.""" with open(uri, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def add_images( self, uris: List[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more images through the embeddings and add to the vectorstore. Args: uris List[str]: File path to the image. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added images. """ tbl = self.get_table() # Map from uris to b64 encoded strings b64_texts = [self.encode_image(uri=uri) for uri in uris] # Populate IDs if ids is None: ids = [str(uuid.uuid4()) for _ in uris] embeddings = None # Set embeddings if self._embedding is not None and hasattr(self._embedding, "embed_image"): embeddings = self._embedding.embed_image(uris=uris) else: raise ValueError( "embedding object should be provided and must have embed_image method." ) data = [] for idx, emb in enumerate(embeddings): metadata = metadatas[idx] if metadatas else {"id": ids[idx]} data.append( { self._vector_key: emb, self._id_key: ids[idx], self._text_key: b64_texts[idx], "metadata": metadata, } ) if tbl is None: tbl = self._connection.create_table(self._table_name, data=data) self._table = tbl else: tbl.add(data) return ids def _query( self, query: Any, k: Optional[int] = None, filter: Optional[Any] = None, name: Optional[str] = None, **kwargs: Any, ) -> Any: if k is None: k = self.limit tbl = self.get_table(name) if isinstance(filter, dict): filter = to_lance_filter(filter) prefilter = kwargs.get("prefilter", False) query_type = kwargs.get("query_type", "vector") if metrics := kwargs.get("metrics"): lance_query = ( tbl.search(query=query, vector_column_name=self._vector_key) .limit(k) .metric(metrics) .where(filter, prefilter=prefilter) ) else: lance_query = ( tbl.search(query=query, vector_column_name=self._vector_key) .limit(k) .where(filter, prefilter=prefilter) ) if query_type == "hybrid" and self._reranker is not None: lance_query.rerank(reranker=self._reranker) docs = lance_query.to_arrow() if len(docs) == 0: warnings.warn("No results found for the query.") return docs def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn: return self.override_relevance_score_fn if self.distance == "cosine": return self._cosine_relevance_score_fn elif self.distance == "l2": return self._euclidean_relevance_score_fn elif self.distance == "ip": return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance metric of type: {self.distance}." "Consider providing relevance_score_fn to Chroma constructor." ) def similarity_search_by_vector( self, embedding: List[float], k: Optional[int] = None, filter: Optional[Dict[str, str]] = None, name: Optional[str] = None, **kwargs: Any, ) -> Any: """ Return documents most similar to the query vector. """ if k is None: k = self.limit res = self._query(embedding, k, filter=filter, name=name, **kwargs) return self.results_to_docs(res, score=kwargs.pop("score", False)) def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: Optional[int] = None, filter: Optional[Dict[str, str]] = None, name: Optional[str] = None, **kwargs: Any, ) -> Any: """ Return documents most similar to the query vector with relevance scores. """ if k is None: k = self.limit relevance_score_fn = self._select_relevance_score_fn() docs_and_scores = self.similarity_search_by_vector( embedding, k, score=True, **kwargs ) return [ (doc, relevance_score_fn(float(score))) for doc, score in docs_and_scores ] def similarity_search_with_score( self, query: str, k: Optional[int] = None, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> Any: """Return documents most similar to the query with relevance scores.""" if k is None: k = self.limit score = kwargs.get("score", True) name = kwargs.get("name", None) query_type = kwargs.get("query_type", "vector") if self._embedding is None: raise ValueError("search needs an emmbedding function to be specified.") if query_type == "fts" or query_type == "hybrid": if self.api_key is None and self._fts_index is None: tbl = self.get_table(name) self._fts_index = tbl.create_fts_index(self._text_key, replace=True) if query_type == "hybrid": embedding = self._embedding.embed_query(query) _query = (embedding, query) else: _query = query # type: ignore res = self._query(_query, k, filter=filter, name=name, **kwargs) return self.results_to_docs(res, score=score) else: raise NotImplementedError( "Full text/ Hybrid search is not supported in LanceDB Cloud yet." ) else: embedding = self._embedding.embed_query(query) res = self._query(embedding, k, filter=filter, **kwargs) return self.results_to_docs(res, score=score) def similarity_search( self, query: str, k: Optional[int] = None, name: Optional[str] = None, filter: Optional[Any] = None, fts: Optional[bool] = False, **kwargs: Any, ) -> List[Document]: """Return documents most similar to the query Args: query: String to query the vectorstore with. k: Number of documents to return. filter (Optional[Dict]): Optional filter arguments sql_filter(Optional[string]): SQL filter to apply to the query. prefilter(Optional[bool]): Whether to apply the filter prior to the vector search. Raises: ValueError: If the specified table is not found in the database. Returns: List of documents most similar to the query. """ res = self.similarity_search_with_score( query=query, k=k, name=name, filter=filter, fts=fts, score=False, **kwargs ) return res def max_marginal_relevance_search( self, query: str, k: Optional[int] = None, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if k is None: k = self.limit if self._embedding is None: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) embedding = self._embedding.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult, filter=filter, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: Optional[int] = None, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self._query( query=embedding, k=fetch_k, filter=filter, **kwargs, ) mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), results["vector"].to_pylist(), k=k or self.limit, lambda_mult=lambda_mult, ) candidates = self.results_to_docs(results) selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] return selected_results @classmethod def from_texts( cls: Type[LanceDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, connection: Optional[Any] = None, vector_key: Optional[str] = "vector", id_key: Optional[str] = "id", text_key: Optional[str] = "text", table_name: Optional[str] = "vectorstore", api_key: Optional[str] = None, region: Optional[str] = None, mode: Optional[str] = "overwrite", distance: Optional[str] = "l2", reranker: Optional[Any] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, **kwargs: Any, ) -> LanceDB: instance = LanceDB( connection=connection, embedding=embedding, vector_key=vector_key, id_key=id_key, text_key=text_key, table_name=table_name, api_key=api_key, region=region, mode=mode, distance=distance, reranker=reranker, relevance_score_fn=relevance_score_fn, **kwargs, ) instance.add_texts(texts, metadatas=metadatas) return instance def delete( self, ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, filter: Optional[str] = None, drop_columns: Optional[List[str]] = None, name: Optional[str] = None, **kwargs: Any, ) -> None: """ Allows deleting rows by filtering, by ids or drop columns from the table. Args: filter: Provide a string SQL expression - "{col} {operation} {value}". ids: Provide list of ids to delete from the table. drop_columns: Provide list of columns to drop from the table. delete_all: If True, delete all rows from the table. """ tbl = self.get_table(name) if filter: tbl.delete(filter) elif ids: tbl.delete("id in ('{}')".format(",".join(ids))) elif drop_columns: if self.api_key is not None: raise NotImplementedError( "Column operations currently not supported in LanceDB Cloud." ) else: tbl.drop_columns(drop_columns) elif delete_all: tbl.delete("true") else: raise ValueError("Provide either filter, ids, drop_columns or delete_all")
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/sklearn.py
"""Wrapper around scikit-learn NearestNeighbors implementation. The vector store can be persisted in json, bson or parquet format. """ import json import math import os from abc import ABC, abstractmethod from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Type from uuid import uuid4 from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import guard_import from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance DEFAULT_K = 4 # Number of Documents to return. DEFAULT_FETCH_K = 20 # Number of Documents to initially fetch during MMR search. class BaseSerializer(ABC): """Base class for serializing data.""" def __init__(self, persist_path: str) -> None: self.persist_path = persist_path @classmethod @abstractmethod def extension(cls) -> str: """The file extension suggested by this serializer (without dot).""" @abstractmethod def save(self, data: Any) -> None: """Saves the data to the persist_path""" @abstractmethod def load(self) -> Any: """Loads the data from the persist_path""" class JsonSerializer(BaseSerializer): """Serialize data in JSON using the json package from python standard library.""" @classmethod def extension(cls) -> str: return "json" def save(self, data: Any) -> None: with open(self.persist_path, "w") as fp: json.dump(data, fp) def load(self) -> Any: with open(self.persist_path, "r") as fp: return json.load(fp) class BsonSerializer(BaseSerializer): """Serialize data in Binary JSON using the `bson` python package.""" def __init__(self, persist_path: str) -> None: super().__init__(persist_path) self.bson = guard_import("bson") @classmethod def extension(cls) -> str: return "bson" def save(self, data: Any) -> None: with open(self.persist_path, "wb") as fp: fp.write(self.bson.dumps(data)) def load(self) -> Any: with open(self.persist_path, "rb") as fp: return self.bson.loads(fp.read()) class ParquetSerializer(BaseSerializer): """Serialize data in `Apache Parquet` format using the `pyarrow` package.""" def __init__(self, persist_path: str) -> None: super().__init__(persist_path) self.pd = guard_import("pandas") self.pa = guard_import("pyarrow") self.pq = guard_import("pyarrow.parquet") @classmethod def extension(cls) -> str: return "parquet" def save(self, data: Any) -> None: df = self.pd.DataFrame(data) table = self.pa.Table.from_pandas(df) if os.path.exists(self.persist_path): backup_path = str(self.persist_path) + "-backup" os.rename(self.persist_path, backup_path) try: self.pq.write_table(table, self.persist_path) except Exception as exc: os.rename(backup_path, self.persist_path) raise exc else: os.remove(backup_path) else: self.pq.write_table(table, self.persist_path) def load(self) -> Any: table = self.pq.read_table(self.persist_path) df = table.to_pandas() return {col: series.tolist() for col, series in df.items()} SERIALIZER_MAP: Dict[str, Type[BaseSerializer]] = { "json": JsonSerializer, "bson": BsonSerializer, "parquet": ParquetSerializer, } class SKLearnVectorStoreException(RuntimeError): """Exception raised by SKLearnVectorStore.""" pass class SKLearnVectorStore(VectorStore): """Simple in-memory vector store based on the `scikit-learn` library `NearestNeighbors`.""" def __init__( self, embedding: Embeddings, *, persist_path: Optional[str] = None, serializer: Literal["json", "bson", "parquet"] = "json", metric: str = "cosine", **kwargs: Any, ) -> None: np = guard_import("numpy") sklearn_neighbors = guard_import("sklearn.neighbors", pip_name="scikit-learn") # non-persistent properties self._np = np self._neighbors = sklearn_neighbors.NearestNeighbors(metric=metric, **kwargs) self._neighbors_fitted = False self._embedding_function = embedding self._persist_path = persist_path self._serializer: Optional[BaseSerializer] = None if self._persist_path is not None: serializer_cls = SERIALIZER_MAP[serializer] self._serializer = serializer_cls(persist_path=self._persist_path) # data properties self._embeddings: List[List[float]] = [] self._texts: List[str] = [] self._metadatas: List[dict] = [] self._ids: List[str] = [] # cache properties self._embeddings_np: Any = np.asarray([]) if self._persist_path is not None and os.path.isfile(self._persist_path): self._load() @property def embeddings(self) -> Embeddings: return self._embedding_function def persist(self) -> None: if self._serializer is None: raise SKLearnVectorStoreException( "You must specify a persist_path on creation to persist the " "collection." ) data = { "ids": self._ids, "texts": self._texts, "metadatas": self._metadatas, "embeddings": self._embeddings, } self._serializer.save(data) def _load(self) -> None: if self._serializer is None: raise SKLearnVectorStoreException( "You must specify a persist_path on creation to load the " "collection." ) data = self._serializer.load() self._embeddings = data["embeddings"] self._texts = data["texts"] self._metadatas = data["metadatas"] self._ids = data["ids"] self._update_neighbors() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: _texts = list(texts) _ids = ids or [str(uuid4()) for _ in _texts] self._texts.extend(_texts) self._embeddings.extend(self._embedding_function.embed_documents(_texts)) self._metadatas.extend(metadatas or ([{}] * len(_texts))) self._ids.extend(_ids) self._update_neighbors() return _ids def _update_neighbors(self) -> None: if len(self._embeddings) == 0: raise SKLearnVectorStoreException( "No data was added to SKLearnVectorStore." ) self._embeddings_np = self._np.asarray(self._embeddings) self._neighbors.fit(self._embeddings_np) self._neighbors_fitted = True def _similarity_index_search_with_score( self, query_embedding: List[float], *, k: int = DEFAULT_K, **kwargs: Any ) -> List[Tuple[int, float]]: """Search k embeddings similar to the query embedding. Returns a list of (index, distance) tuples.""" if not self._neighbors_fitted: raise SKLearnVectorStoreException( "No data was added to SKLearnVectorStore." ) neigh_dists, neigh_idxs = self._neighbors.kneighbors( [query_embedding], n_neighbors=k ) return list(zip(neigh_idxs[0], neigh_dists[0])) def similarity_search_with_score( self, query: str, *, k: int = DEFAULT_K, **kwargs: Any ) -> List[Tuple[Document, float]]: query_embedding = self._embedding_function.embed_query(query) indices_dists = self._similarity_index_search_with_score( query_embedding, k=k, **kwargs ) return [ ( Document( page_content=self._texts[idx], metadata={"id": self._ids[idx], **self._metadatas[idx]}, ), dist, ) for idx, dist in indices_dists ] def similarity_search( self, query: str, k: int = DEFAULT_K, **kwargs: Any ) -> List[Document]: docs_scores = self.similarity_search_with_score(query, k=k, **kwargs) return [doc for doc, _ in docs_scores] def _similarity_search_with_relevance_scores( self, query: str, k: int = DEFAULT_K, **kwargs: Any ) -> List[Tuple[Document, float]]: docs_dists = self.similarity_search_with_score(query, k=k, **kwargs) docs, dists = zip(*docs_dists) scores = [1 / math.exp(dist) for dist in dists] return list(zip(list(docs), scores)) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ indices_dists = self._similarity_index_search_with_score( embedding, k=fetch_k, **kwargs ) indices, _ = zip(*indices_dists) result_embeddings = self._embeddings_np[indices,] mmr_selected = maximal_marginal_relevance( self._np.array(embedding, dtype=self._np.float32), result_embeddings, k=k, lambda_mult=lambda_mult, ) mmr_indices = [indices[i] for i in mmr_selected] return [ Document( page_content=self._texts[idx], metadata={"id": self._ids[idx], **self._metadatas[idx]}, ) for idx in mmr_indices ] def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on creation." ) embedding = self._embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mul=lambda_mult ) return docs @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, persist_path: Optional[str] = None, **kwargs: Any, ) -> "SKLearnVectorStore": vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs) vs.add_texts(texts, metadatas=metadatas, ids=ids) return vs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/cassandra.py
from __future__ import annotations import asyncio import importlib.metadata import typing import uuid import warnings from typing import ( Any, Awaitable, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar, Union, ) import numpy as np from packaging.version import Version # this is a lancghain-core dependency if typing.TYPE_CHECKING: from cassandra.cluster import Session from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore, VectorStoreRetriever from langchain_community.utilities.cassandra import SetupMode from langchain_community.vectorstores.utils import maximal_marginal_relevance CVST = TypeVar("CVST", bound="Cassandra") MIN_CASSIO_VERSION = Version("0.1.10") class Cassandra(VectorStore): _embedding_dimension: Union[int, None] def _get_embedding_dimension(self) -> int: if self._embedding_dimension is None: self._embedding_dimension = len( self.embedding.embed_query("This is a sample sentence.") ) return self._embedding_dimension async def _aget_embedding_dimension(self) -> int: if self._embedding_dimension is None: self._embedding_dimension = len( await self.embedding.aembed_query("This is a sample sentence.") ) return self._embedding_dimension def __init__( self, embedding: Embeddings, session: Optional[Session] = None, keyspace: Optional[str] = None, table_name: str = "", ttl_seconds: Optional[int] = None, *, body_index_options: Optional[List[Tuple[str, Any]]] = None, setup_mode: SetupMode = SetupMode.SYNC, metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all", ) -> None: """Apache Cassandra(R) for vector-store workloads. To use it, you need a recent installation of the `cassio` library and a Cassandra cluster / Astra DB instance supporting vector capabilities. Visit the cassio.org website for extensive quickstarts and code examples. Example: .. code-block:: python from langchain_community.vectorstores import Cassandra from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() session = ... # create your Cassandra session object keyspace = 'my_keyspace' # the keyspace should exist already table_name = 'my_vector_store' vectorstore = Cassandra(embeddings, session, keyspace, table_name) Args: embedding: Embedding function to use. session: Cassandra driver session. If not provided, it is resolved from cassio. keyspace: Cassandra keyspace. If not provided, it is resolved from cassio. table_name: Cassandra table (required). ttl_seconds: Optional time-to-live for the added texts. body_index_options: Optional options used to create the body index. Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER] setup_mode: mode used to create the Cassandra table (SYNC, ASYNC or OFF). metadata_indexing: Optional specification of a metadata indexing policy, i.e. to fine-tune which of the metadata fields are indexed. It can be a string ("all" or "none"), or a 2-tuple. The following means that all fields except 'f1', 'f2' ... are NOT indexed: metadata_indexing=("allowlist", ["f1", "f2", ...]) The following means all fields EXCEPT 'g1', 'g2', ... are indexed: metadata_indexing("denylist", ["g1", "g2", ...]) The default is to index every metadata field. Note: if you plan to have massive unique text metadata entries, consider not indexing them for performance (and to overcome max-length limitations). """ try: from cassio.table import MetadataVectorCassandraTable except (ImportError, ModuleNotFoundError): raise ImportError( "Could not import cassio python package. " "Please install it with `pip install cassio`." ) cassio_version = Version(importlib.metadata.version("cassio")) if cassio_version is not None and cassio_version < MIN_CASSIO_VERSION: msg = ( "Cassio version not supported. Please upgrade cassio " f"to version {MIN_CASSIO_VERSION} or higher." ) raise ImportError(msg) if not table_name: raise ValueError("Missing required parameter 'table_name'.") self.embedding = embedding self.session = session self.keyspace = keyspace self.table_name = table_name self.ttl_seconds = ttl_seconds # self._embedding_dimension = None # kwargs: Dict[str, Any] = {} if body_index_options is not None: kwargs["body_index_options"] = body_index_options if setup_mode == SetupMode.ASYNC: kwargs["async_setup"] = True embedding_dimension: Union[int, Awaitable[int], None] = None if setup_mode == SetupMode.ASYNC: embedding_dimension = self._aget_embedding_dimension() elif setup_mode == SetupMode.SYNC: embedding_dimension = self._get_embedding_dimension() self.table = MetadataVectorCassandraTable( session=session, keyspace=keyspace, table=table_name, vector_dimension=embedding_dimension, metadata_indexing=metadata_indexing, primary_key_type="TEXT", skip_provisioning=setup_mode == SetupMode.OFF, **kwargs, ) if self.session is None: self.session = self.table.session @property def embeddings(self) -> Embeddings: return self.embedding def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The underlying VectorTable already returns a "score proper", i.e. one in [0, 1] where higher means more *similar*, so here the final score transformation is not reversing the interval: """ return lambda score: score def delete_collection(self) -> None: """ Just an alias for `clear` (to better align with other VectorStore implementations). """ self.clear() async def adelete_collection(self) -> None: """ Just an alias for `aclear` (to better align with other VectorStore implementations). """ await self.aclear() def clear(self) -> None: """Empty the table.""" self.table.clear() async def aclear(self) -> None: """Empty the table.""" await self.table.aclear() def delete_by_document_id(self, document_id: str) -> None: """Delete by document ID. Args: document_id: the document ID to delete. """ return self.table.delete(row_id=document_id) async def adelete_by_document_id(self, document_id: str) -> None: """Delete by document ID. Args: document_id: the document ID to delete. """ return await self.table.adelete(row_id=document_id) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector IDs. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids is None: raise ValueError("No ids provided to delete.") for document_id in ids: self.delete_by_document_id(document_id) return True async def adelete( self, ids: Optional[List[str]] = None, **kwargs: Any ) -> Optional[bool]: """Delete by vector IDs. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids is None: raise ValueError("No ids provided to delete.") for document_id in ids: await self.adelete_by_document_id(document_id) return True def delete_by_metadata_filter( self, filter: dict[str, Any], *, batch_size: int = 50, ) -> int: """Delete all documents matching a certain metadata filtering condition. This operation does not use the vector embeddings in any way, it simply removes all documents whose metadata match the provided condition. Args: filter: Filter on the metadata to apply. The filter cannot be empty. batch_size: amount of deletions per each batch (until exhaustion of the matching documents). Returns: A number expressing the amount of deleted documents. """ if not filter: msg = ( "Method `delete_by_metadata_filter` does not accept an empty " "filter. Use the `clear()` method if you really want to empty " "the vector store." ) raise ValueError(msg) return self.table.find_and_delete_entries( metadata=filter, batch_size=batch_size, ) async def adelete_by_metadata_filter( self, filter: dict[str, Any], *, batch_size: int = 50, ) -> int: """Delete all documents matching a certain metadata filtering condition. This operation does not use the vector embeddings in any way, it simply removes all documents whose metadata match the provided condition. Args: filter: Filter on the metadata to apply. The filter cannot be empty. batch_size: amount of deletions per each batch (until exhaustion of the matching documents). Returns: A number expressing the amount of deleted documents. """ if not filter: msg = ( "Method `delete_by_metadata_filter` does not accept an empty " "filter. Use the `clear()` method if you really want to empty " "the vector store." ) raise ValueError(msg) return await self.table.afind_and_delete_entries( metadata=filter, batch_size=batch_size, ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 16, ttl_seconds: Optional[int] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Texts to add to the vectorstore. metadatas: Optional list of metadatas. ids: Optional list of IDs. batch_size: Number of concurrent requests to send to the server. ttl_seconds: Optional time-to-live for the added texts. Returns: List[str]: List of IDs of the added texts. """ _texts = list(texts) ids = ids or [uuid.uuid4().hex for _ in _texts] metadatas = metadatas or [{}] * len(_texts) ttl_seconds = ttl_seconds or self.ttl_seconds embedding_vectors = self.embedding.embed_documents(_texts) for i in range(0, len(_texts), batch_size): batch_texts = _texts[i : i + batch_size] batch_embedding_vectors = embedding_vectors[i : i + batch_size] batch_ids = ids[i : i + batch_size] batch_metadatas = metadatas[i : i + batch_size] futures = [ self.table.put_async( row_id=text_id, body_blob=text, vector=embedding_vector, metadata=metadata or {}, ttl_seconds=ttl_seconds, ) for text, embedding_vector, text_id, metadata in zip( batch_texts, batch_embedding_vectors, batch_ids, batch_metadatas ) ] for future in futures: future.result() return ids async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, concurrency: int = 16, ttl_seconds: Optional[int] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Texts to add to the vectorstore. metadatas: Optional list of metadatas. ids: Optional list of IDs. concurrency: Number of concurrent queries to the database. Defaults to 16. ttl_seconds: Optional time-to-live for the added texts. Returns: List[str]: List of IDs of the added texts. """ _texts = list(texts) ids = ids or [uuid.uuid4().hex for _ in _texts] _metadatas: List[dict] = metadatas or [{}] * len(_texts) ttl_seconds = ttl_seconds or self.ttl_seconds embedding_vectors = await self.embedding.aembed_documents(_texts) sem = asyncio.Semaphore(concurrency) async def send_concurrently( row_id: str, text: str, embedding_vector: List[float], metadata: dict ) -> None: async with sem: await self.table.aput( row_id=row_id, body_blob=text, vector=embedding_vector, metadata=metadata or {}, ttl_seconds=ttl_seconds, ) for i in range(0, len(_texts)): tasks = [ asyncio.create_task( send_concurrently( ids[i], _texts[i], embedding_vectors[i], _metadatas[i] ) ) ] await asyncio.gather(*tasks) return ids def replace_metadata( self, id_to_metadata: dict[str, dict], *, batch_size: int = 50, ) -> None: """Replace the metadata of documents. For each document to update, identified by its ID, the new metadata dictionary completely replaces what is on the store. This includes passing empty metadata `{}` to erase the currently-stored information. Args: id_to_metadata: map from the Document IDs to modify to the new metadata for updating. Keys in this dictionary that do not correspond to an existing document will not cause an error, rather will result in new rows being written into the Cassandra table but without an associated vector: hence unreachable through vector search. batch_size: Number of concurrent requests to send to the server. Returns: None if the writes succeed (otherwise an error is raised). """ ids_and_metadatas = list(id_to_metadata.items()) for i in range(0, len(ids_and_metadatas), batch_size): batch_i_m = ids_and_metadatas[i : i + batch_size] futures = [ self.table.put_async( row_id=doc_id, metadata=doc_md, ) for doc_id, doc_md in batch_i_m ] for future in futures: future.result() return async def areplace_metadata( self, id_to_metadata: dict[str, dict], *, concurrency: int = 50, ) -> None: """Replace the metadata of documents. For each document to update, identified by its ID, the new metadata dictionary completely replaces what is on the store. This includes passing empty metadata `{}` to erase the currently-stored information. Args: id_to_metadata: map from the Document IDs to modify to the new metadata for updating. Keys in this dictionary that do not correspond to an existing document will not cause an error, rather will result in new rows being written into the Cassandra table but without an associated vector: hence unreachable through vector search. concurrency: Number of concurrent queries to the database. Defaults to 50. Returns: None if the writes succeed (otherwise an error is raised). """ ids_and_metadatas = list(id_to_metadata.items()) sem = asyncio.Semaphore(concurrency) async def send_concurrently(doc_id: str, doc_md: dict) -> None: async with sem: await self.table.aput( row_id=doc_id, metadata=doc_md, ) for doc_id, doc_md in ids_and_metadatas: tasks = [asyncio.create_task(send_concurrently(doc_id, doc_md))] await asyncio.gather(*tasks) return @staticmethod def _row_to_document(row: Dict[str, Any]) -> Document: return Document( id=row["row_id"], page_content=row["body_blob"], metadata=row["metadata"], ) def get_by_document_id(self, document_id: str) -> Document | None: """Retrieve a single document from the store, given its document ID. Args: document_id: The document ID Returns: The the document if it exists. Otherwise None. """ row = self.table.get(row_id=document_id) if row is None: return None return self._row_to_document(row=row) async def aget_by_document_id(self, document_id: str) -> Document | None: """Retrieve a single document from the store, given its document ID. Args: document_id: The document ID Returns: The the document if it exists. Otherwise None. """ row = await self.table.aget(row_id=document_id) if row is None: return None return self._row_to_document(row=row) def metadata_search( self, filter: dict[str, Any] = {}, # noqa: B006 n: int = 5, ) -> Iterable[Document]: """Get documents via a metadata search. Args: filter: the metadata to query for. n: the maximum number of documents to return. """ rows = self.table.find_entries(metadata=filter, n=n) return [self._row_to_document(row=row) for row in rows if row] async def ametadata_search( self, filter: dict[str, Any] = {}, # noqa: B006 n: int = 5, ) -> Iterable[Document]: """Get documents via a metadata search. Args: filter: the metadata to query for. n: the maximum number of documents to return. """ rows = await self.table.afind_entries(metadata=filter, n=n) return [self._row_to_document(row=row) for row in rows] async def asimilarity_search_with_embedding_id_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, List[float], str]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, embedding, id), the most similar to the query vector. """ kwargs: Dict[str, Any] = {} if filter is not None: kwargs["metadata"] = filter if body_search is not None: kwargs["body_search"] = body_search hits = await self.table.aann_search( vector=embedding, n=k, **kwargs, ) return [ ( self._row_to_document(row=hit), hit["vector"], hit["row_id"], ) for hit in hits ] @staticmethod def _search_to_documents( hits: Iterable[Dict[str, Any]], ) -> List[Tuple[Document, float, str]]: # We stick to 'cos' distance as it can be normalized on a 0-1 axis # (1=most relevant), as required by this class' contract. return [ ( Cassandra._row_to_document(row=hit), 0.5 + 0.5 * hit["distance"], hit["row_id"], ) for hit in hits ] # id-returning search facilities def similarity_search_with_score_id_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float, str]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score, id), the most similar to the query vector. """ kwargs: Dict[str, Any] = {} if filter is not None: kwargs["metadata"] = filter if body_search is not None: kwargs["body_search"] = body_search hits = self.table.metric_ann_search( vector=embedding, n=k, metric="cos", **kwargs, ) return self._search_to_documents(hits) async def asimilarity_search_with_score_id_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float, str]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score, id), the most similar to the query vector. """ kwargs: Dict[str, Any] = {} if filter is not None: kwargs["metadata"] = filter if body_search is not None: kwargs["body_search"] = body_search hits = await self.table.ametric_ann_search( vector=embedding, n=k, metric="cos", **kwargs, ) return self._search_to_documents(hits) def similarity_search_with_score_id( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float, str]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score, id), the most similar to the query vector. """ embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_id_by_vector( embedding=embedding_vector, k=k, filter=filter, body_search=body_search, ) async def asimilarity_search_with_score_id( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float, str]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score, id), the most similar to the query vector. """ embedding_vector = await self.embedding.aembed_query(query) return await self.asimilarity_search_with_score_id_by_vector( embedding=embedding_vector, k=k, filter=filter, body_search=body_search, ) # id-unaware search facilities def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score), the most similar to the query vector. """ return [ (doc, score) for (doc, score, docId) in self.similarity_search_with_score_id_by_vector( embedding=embedding, k=k, filter=filter, body_search=body_search, ) ] async def asimilarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score), the most similar to the query vector. """ return [ (doc, score) for ( doc, score, _, ) in await self.asimilarity_search_with_score_id_by_vector( embedding=embedding, k=k, filter=filter, body_search=body_search, ) ] def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Document, the most similar to the query vector. """ embedding_vector = self.embedding.embed_query(query) return self.similarity_search_by_vector( embedding_vector, k, filter=filter, body_search=body_search, ) async def asimilarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Document, the most similar to the query vector. """ embedding_vector = await self.embedding.aembed_query(query) return await self.asimilarity_search_by_vector( embedding_vector, k, filter=filter, body_search=body_search, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Document, the most similar to the query vector. """ return [ doc for doc, _ in self.similarity_search_with_score_by_vector( embedding, k, filter=filter, body_search=body_search, ) ] async def asimilarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Document, the most similar to the query vector. """ return [ doc for doc, _ in await self.asimilarity_search_with_score_by_vector( embedding, k, filter=filter, body_search=body_search, ) ] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score), the most similar to the query vector. """ embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_by_vector( embedding_vector, k, filter=filter, body_search=body_search, ) async def asimilarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of (Document, score), the most similar to the query vector. """ embedding_vector = await self.embedding.aembed_query(query) return await self.asimilarity_search_with_score_by_vector( embedding_vector, k, filter=filter, body_search=body_search, ) @staticmethod def _mmr_search_to_documents( prefetch_hits: List[Dict[str, Any]], embedding: List[float], k: int, lambda_mult: float, ) -> List[Document]: # let the mmr utility pick the *indices* in the above array mmr_chosen_indices = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), [pf_hit["vector"] for pf_hit in prefetch_hits], k=k, lambda_mult=lambda_mult, ) mmr_hits = [ pf_hit for pf_index, pf_hit in enumerate(prefetch_hits) if pf_index in mmr_chosen_indices ] return [Cassandra._row_to_document(row=hit) for hit in mmr_hits] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Documents selected by maximal marginal relevance. """ _kwargs: Dict[str, Any] = {} if filter is not None: _kwargs["metadata"] = filter if body_search is not None: _kwargs["body_search"] = body_search prefetch_hits = list( self.table.metric_ann_search( vector=embedding, n=fetch_k, metric="cos", **_kwargs, ) ) return self._mmr_search_to_documents(prefetch_hits, embedding, k, lambda_mult) async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Documents selected by maximal marginal relevance. """ _kwargs: Dict[str, Any] = {} if filter is not None: _kwargs["metadata"] = filter if body_search is not None: _kwargs["body_search"] = body_search prefetch_hits = list( await self.table.ametric_ann_search( vector=embedding, n=fetch_k, metric="cos", **_kwargs, ) ) return self._mmr_search_to_documents(prefetch_hits, embedding, k, lambda_mult) def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Documents selected by maximal marginal relevance. """ embedding_vector = self.embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding_vector, k, fetch_k, lambda_mult=lambda_mult, filter=filter, body_search=body_search, ) async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, body_search: Optional[Union[str, List[str]]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on the metadata to apply. body_search: Document textual search terms to apply. Only supported by Astra DB at the moment. Returns: List of Documents selected by maximal marginal relevance. """ embedding_vector = await self.embedding.aembed_query(query) return await self.amax_marginal_relevance_search_by_vector( embedding_vector, k, fetch_k, lambda_mult=lambda_mult, filter=filter, body_search=body_search, ) @staticmethod def _build_docs_from_texts( texts: List[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, ) -> List[Document]: docs: List[Document] = [] for i, text in enumerate(texts): doc = Document( page_content=text, ) if metadatas is not None: doc.metadata = metadatas[i] if ids is not None: doc.id = ids[i] docs.append(doc) return docs @classmethod def from_texts( cls: Type[CVST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, *, session: Optional[Session] = None, keyspace: Optional[str] = None, table_name: str = "", ids: Optional[List[str]] = None, ttl_seconds: Optional[int] = None, body_index_options: Optional[List[Tuple[str, Any]]] = None, metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all", **kwargs: Any, ) -> CVST: """Create a Cassandra vector store from raw texts. Args: texts: Texts to add to the vectorstore. embedding: Embedding function to use. metadatas: Optional list of metadatas associated with the texts. session: Cassandra driver session. If not provided, it is resolved from cassio. keyspace: Cassandra key space. If not provided, it is resolved from cassio. table_name: Cassandra table (required). ids: Optional list of IDs associated with the texts. ttl_seconds: Optional time-to-live for the added texts. body_index_options: Optional options used to create the body index. Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER] metadata_indexing: Optional specification of a metadata indexing policy, i.e. to fine-tune which of the metadata fields are indexed. It can be a string ("all" or "none"), or a 2-tuple. The following means that all fields except 'f1', 'f2' ... are NOT indexed: metadata_indexing=("allowlist", ["f1", "f2", ...]) The following means all fields EXCEPT 'g1', 'g2', ... are indexed: metadata_indexing("denylist", ["g1", "g2", ...]) The default is to index every metadata field. Note: if you plan to have massive unique text metadata entries, consider not indexing them for performance (and to overcome max-length limitations). Returns: a Cassandra vector store. """ docs = cls._build_docs_from_texts( texts=texts, metadatas=metadatas, ids=ids, ) return cls.from_documents( documents=docs, embedding=embedding, session=session, keyspace=keyspace, table_name=table_name, ttl_seconds=ttl_seconds, body_index_options=body_index_options, metadata_indexing=metadata_indexing, **kwargs, ) @classmethod async def afrom_texts( cls: Type[CVST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, *, session: Optional[Session] = None, keyspace: Optional[str] = None, table_name: str = "", ids: Optional[List[str]] = None, ttl_seconds: Optional[int] = None, body_index_options: Optional[List[Tuple[str, Any]]] = None, metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all", **kwargs: Any, ) -> CVST: """Create a Cassandra vector store from raw texts. Args: texts: Texts to add to the vectorstore. embedding: Embedding function to use. metadatas: Optional list of metadatas associated with the texts. session: Cassandra driver session. If not provided, it is resolved from cassio. keyspace: Cassandra key space. If not provided, it is resolved from cassio. table_name: Cassandra table (required). ids: Optional list of IDs associated with the texts. ttl_seconds: Optional time-to-live for the added texts. body_index_options: Optional options used to create the body index. Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER] metadata_indexing: Optional specification of a metadata indexing policy, i.e. to fine-tune which of the metadata fields are indexed. It can be a string ("all" or "none"), or a 2-tuple. The following means that all fields except 'f1', 'f2' ... are NOT indexed: metadata_indexing=("allowlist", ["f1", "f2", ...]) The following means all fields EXCEPT 'g1', 'g2', ... are indexed: metadata_indexing("denylist", ["g1", "g2", ...]) The default is to index every metadata field. Note: if you plan to have massive unique text metadata entries, consider not indexing them for performance (and to overcome max-length limitations). Returns: a Cassandra vector store. """ docs = cls._build_docs_from_texts( texts=texts, metadatas=metadatas, ids=ids, ) return await cls.afrom_documents( documents=docs, embedding=embedding, session=session, keyspace=keyspace, table_name=table_name, ttl_seconds=ttl_seconds, body_index_options=body_index_options, metadata_indexing=metadata_indexing, **kwargs, ) @staticmethod def _add_ids_to_docs( docs: List[Document], ids: Optional[List[str]] = None, ) -> List[Document]: if ids is not None: for doc, doc_id in zip(docs, ids): doc.id = doc_id return docs @classmethod def from_documents( cls: Type[CVST], documents: List[Document], embedding: Embeddings, *, session: Optional[Session] = None, keyspace: Optional[str] = None, table_name: str = "", ids: Optional[List[str]] = None, ttl_seconds: Optional[int] = None, body_index_options: Optional[List[Tuple[str, Any]]] = None, metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all", **kwargs: Any, ) -> CVST: """Create a Cassandra vector store from a document list. Args: documents: Documents to add to the vectorstore. embedding: Embedding function to use. session: Cassandra driver session. If not provided, it is resolved from cassio. keyspace: Cassandra key space. If not provided, it is resolved from cassio. table_name: Cassandra table (required). ids: Optional list of IDs associated with the documents. ttl_seconds: Optional time-to-live for the added documents. body_index_options: Optional options used to create the body index. Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER] metadata_indexing: Optional specification of a metadata indexing policy, i.e. to fine-tune which of the metadata fields are indexed. It can be a string ("all" or "none"), or a 2-tuple. The following means that all fields except 'f1', 'f2' ... are NOT indexed: metadata_indexing=("allowlist", ["f1", "f2", ...]) The following means all fields EXCEPT 'g1', 'g2', ... are indexed: metadata_indexing("denylist", ["g1", "g2", ...]) The default is to index every metadata field. Note: if you plan to have massive unique text metadata entries, consider not indexing them for performance (and to overcome max-length limitations). Returns: a Cassandra vector store. """ if ids is not None: warnings.warn( ( "Parameter `ids` to Cassandra's `from_documents` " "method is deprecated. Please set the supplied documents' " "`.id` attribute instead. The id attribute of Document " "is ignored as long as the `ids` parameter is passed." ), DeprecationWarning, stacklevel=2, ) store = cls( embedding=embedding, session=session, keyspace=keyspace, table_name=table_name, ttl_seconds=ttl_seconds, body_index_options=body_index_options, metadata_indexing=metadata_indexing, **kwargs, ) store.add_documents(documents=cls._add_ids_to_docs(docs=documents, ids=ids)) return store @classmethod async def afrom_documents( cls: Type[CVST], documents: List[Document], embedding: Embeddings, *, session: Optional[Session] = None, keyspace: Optional[str] = None, table_name: str = "", ids: Optional[List[str]] = None, ttl_seconds: Optional[int] = None, body_index_options: Optional[List[Tuple[str, Any]]] = None, metadata_indexing: Union[Tuple[str, Iterable[str]], str] = "all", **kwargs: Any, ) -> CVST: """Create a Cassandra vector store from a document list. Args: documents: Documents to add to the vectorstore. embedding: Embedding function to use. session: Cassandra driver session. If not provided, it is resolved from cassio. keyspace: Cassandra key space. If not provided, it is resolved from cassio. table_name: Cassandra table (required). ids: Optional list of IDs associated with the documents. ttl_seconds: Optional time-to-live for the added documents. body_index_options: Optional options used to create the body index. Eg. body_index_options = [cassio.table.cql.STANDARD_ANALYZER] metadata_indexing: Optional specification of a metadata indexing policy, i.e. to fine-tune which of the metadata fields are indexed. It can be a string ("all" or "none"), or a 2-tuple. The following means that all fields except 'f1', 'f2' ... are NOT indexed: metadata_indexing=("allowlist", ["f1", "f2", ...]) The following means all fields EXCEPT 'g1', 'g2', ... are indexed: metadata_indexing("denylist", ["g1", "g2", ...]) The default is to index every metadata field. Note: if you plan to have massive unique text metadata entries, consider not indexing them for performance (and to overcome max-length limitations). Returns: a Cassandra vector store. """ if ids is not None: warnings.warn( ( "Parameter `ids` to Cassandra's `afrom_documents` " "method is deprecated. Please set the supplied documents' " "`.id` attribute instead. The id attribute of Document " "is ignored as long as the `ids` parameter is passed." ), DeprecationWarning, stacklevel=2, ) store = cls( embedding=embedding, session=session, keyspace=keyspace, table_name=table_name, ttl_seconds=ttl_seconds, setup_mode=SetupMode.ASYNC, body_index_options=body_index_options, metadata_indexing=metadata_indexing, **kwargs, ) await store.aadd_documents( documents=cls._add_ids_to_docs(docs=documents, ids=ids) ) return store def as_retriever( self, search_type: str = "similarity", search_kwargs: Optional[Dict[str, Any]] = None, tags: Optional[List[str]] = None, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> VectorStoreRetriever: """Return VectorStoreRetriever initialized from this VectorStore. Args: search_type: Defines the type of search that the Retriever should perform. Can be "similarity" (default), "mmr", or "similarity_score_threshold". search_kwargs: Keyword arguments to pass to the search function. Can include things like: k: Amount of documents to return (Default: 4) score_threshold: Minimum relevance threshold for similarity_score_threshold fetch_k: Amount of documents to pass to MMR algorithm (Default: 20) lambda_mult: Diversity of results returned by MMR; 1 for minimum diversity and 0 for maximum. (Default: 0.5) filter: Filter by document metadata tags: List of tags associated with the retriever. metadata: Metadata associated with the retriever. kwargs: Other arguments passed to the VectorStoreRetriever init. Returns: Retriever for VectorStore. Examples: .. code-block:: python # Retrieve more documents with higher diversity # Useful if your dataset has many similar documents docsearch.as_retriever( search_type="mmr", search_kwargs={'k': 6, 'lambda_mult': 0.25} ) # Fetch more documents for the MMR algorithm to consider # But only return the top 5 docsearch.as_retriever( search_type="mmr", search_kwargs={'k': 5, 'fetch_k': 50} ) # Only retrieve documents that have a relevance score # Above a certain threshold docsearch.as_retriever( search_type="similarity_score_threshold", search_kwargs={'score_threshold': 0.8} ) # Only get the single most similar document from the dataset docsearch.as_retriever(search_kwargs={'k': 1}) # Use a filter to only retrieve documents from a specific paper docsearch.as_retriever( search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}} ) """ _tags = tags or [] + self._get_retriever_tags() return VectorStoreRetriever( vectorstore=self, search_type=search_type, search_kwargs=search_kwargs or {}, tags=_tags, metadata=metadata, **kwargs, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/hanavector.py
"""SAP HANA Cloud Vector Engine""" from __future__ import annotations import importlib.util import json import re from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Pattern, Tuple, Type, ) import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.runnables.config import run_in_executor from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) if TYPE_CHECKING: from hdbcli import dbapi HANA_DISTANCE_FUNCTION: dict = { DistanceStrategy.COSINE: ("COSINE_SIMILARITY", "DESC"), DistanceStrategy.EUCLIDEAN_DISTANCE: ("L2DISTANCE", "ASC"), } COMPARISONS_TO_SQL = { "$eq": "=", "$ne": "<>", "$lt": "<", "$lte": "<=", "$gt": ">", "$gte": ">=", } IN_OPERATORS_TO_SQL = { "$in": "IN", "$nin": "NOT IN", } BETWEEN_OPERATOR = "$between" LIKE_OPERATOR = "$like" LOGICAL_OPERATORS_TO_SQL = {"$and": "AND", "$or": "OR"} default_distance_strategy = DistanceStrategy.COSINE default_table_name: str = "EMBEDDINGS" default_content_column: str = "VEC_TEXT" default_metadata_column: str = "VEC_META" default_vector_column: str = "VEC_VECTOR" default_vector_column_length: int = -1 # -1 means dynamic length class HanaDB(VectorStore): """SAP HANA Cloud Vector Engine The prerequisite for using this class is the installation of the ``hdbcli`` Python package. The HanaDB vectorstore can be created by providing an embedding function and an existing database connection. Optionally, the names of the table and the columns to use. """ def __init__( self, connection: dbapi.Connection, embedding: Embeddings, distance_strategy: DistanceStrategy = default_distance_strategy, table_name: str = default_table_name, content_column: str = default_content_column, metadata_column: str = default_metadata_column, vector_column: str = default_vector_column, vector_column_length: int = default_vector_column_length, *, specific_metadata_columns: Optional[List[str]] = None, ): # Check if the hdbcli package is installed if importlib.util.find_spec("hdbcli") is None: raise ImportError( "Could not import hdbcli python package. " "Please install it with `pip install hdbcli`." ) valid_distance = False for key in HANA_DISTANCE_FUNCTION.keys(): if key is distance_strategy: valid_distance = True if not valid_distance: raise ValueError( "Unsupported distance_strategy: {}".format(distance_strategy) ) self.connection = connection self.embedding = embedding self.distance_strategy = distance_strategy self.table_name = HanaDB._sanitize_name(table_name) self.content_column = HanaDB._sanitize_name(content_column) self.metadata_column = HanaDB._sanitize_name(metadata_column) self.vector_column = HanaDB._sanitize_name(vector_column) self.vector_column_length = HanaDB._sanitize_int(vector_column_length) self.specific_metadata_columns = HanaDB._sanitize_specific_metadata_columns( specific_metadata_columns or [] ) # Check if the table exists, and eventually create it if not self._table_exists(self.table_name): sql_str = ( f'CREATE TABLE "{self.table_name}"(' f'"{self.content_column}" NCLOB, ' f'"{self.metadata_column}" NCLOB, ' f'"{self.vector_column}" REAL_VECTOR ' ) if self.vector_column_length in [-1, 0]: sql_str += ");" else: sql_str += f"({self.vector_column_length}));" try: cur = self.connection.cursor() cur.execute(sql_str) finally: cur.close() # Check if the needed columns exist and have the correct type self._check_column(self.table_name, self.content_column, ["NCLOB", "NVARCHAR"]) self._check_column(self.table_name, self.metadata_column, ["NCLOB", "NVARCHAR"]) self._check_column( self.table_name, self.vector_column, ["REAL_VECTOR"], self.vector_column_length, ) for column_name in self.specific_metadata_columns: self._check_column(self.table_name, column_name) def _table_exists(self, table_name) -> bool: # type: ignore[no-untyped-def] sql_str = ( "SELECT COUNT(*) FROM SYS.TABLES WHERE SCHEMA_NAME = CURRENT_SCHEMA" " AND TABLE_NAME = ?" ) try: cur = self.connection.cursor() cur.execute(sql_str, (table_name)) if cur.has_result_set(): rows = cur.fetchall() if rows[0][0] == 1: return True finally: cur.close() return False def _check_column( # type: ignore[no-untyped-def] self, table_name, column_name, column_type=None, column_length=None ): sql_str = ( "SELECT DATA_TYPE_NAME, LENGTH FROM SYS.TABLE_COLUMNS WHERE " "SCHEMA_NAME = CURRENT_SCHEMA " "AND TABLE_NAME = ? AND COLUMN_NAME = ?" ) try: cur = self.connection.cursor() cur.execute(sql_str, (table_name, column_name)) if cur.has_result_set(): rows = cur.fetchall() if len(rows) == 0: raise AttributeError(f"Column {column_name} does not exist") # Check data type if column_type: if rows[0][0] not in column_type: raise AttributeError( f"Column {column_name} has the wrong type: {rows[0][0]}" ) # Check length, if parameter was provided # Length can either be -1 (QRC01+02-24) or 0 (QRC03-24 onwards) # to indicate no length constraint being present. if column_length is not None and column_length > 0: if rows[0][1] != column_length: raise AttributeError( f"Column {column_name} has the wrong length: {rows[0][1]} " f"expected: {column_length}" ) else: raise AttributeError(f"Column {column_name} does not exist") finally: cur.close() @property def embeddings(self) -> Embeddings: return self.embedding @staticmethod def _sanitize_name(input_str: str) -> str: # type: ignore[misc] # Remove characters that are not alphanumeric or underscores return re.sub(r"[^a-zA-Z0-9_]", "", input_str) @staticmethod def _sanitize_int(input_int: any) -> int: # type: ignore[valid-type] value = int(str(input_int)) if value < -1: raise ValueError(f"Value ({value}) must not be smaller than -1") return int(str(input_int)) @staticmethod def _sanitize_list_float(embedding: List[float]) -> List[float]: for value in embedding: if not isinstance(value, float): raise ValueError(f"Value ({value}) does not have type float") return embedding # Compile pattern only once, for better performance _compiled_pattern: Pattern = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*$") @staticmethod def _sanitize_metadata_keys(metadata: dict) -> dict: for key in metadata.keys(): if not HanaDB._compiled_pattern.match(key): raise ValueError(f"Invalid metadata key {key}") return metadata @staticmethod def _sanitize_specific_metadata_columns( specific_metadata_columns: List[str], ) -> List[str]: metadata_columns = [] for c in specific_metadata_columns: sanitized_name = HanaDB._sanitize_name(c) metadata_columns.append(sanitized_name) return metadata_columns def _split_off_special_metadata(self, metadata: dict) -> Tuple[dict, list]: # Use provided values by default or fallback special_metadata = [] if not metadata: return {}, [] for column_name in self.specific_metadata_columns: special_metadata.append(metadata.get(column_name, None)) return metadata, special_metadata def create_hnsw_index( self, m: Optional[int] = None, # Optional M parameter ef_construction: Optional[int] = None, # Optional efConstruction parameter ef_search: Optional[int] = None, # Optional efSearch parameter index_name: Optional[str] = None, # Optional custom index name ) -> None: """ Creates an HNSW vector index on a specified table and vector column with optional build and search configurations. If no configurations are provided, default parameters from the database are used. If provided values exceed the valid ranges, an error will be raised. The index is always created in ONLINE mode. Args: m: (Optional) Maximum number of neighbors per graph node (Valid Range: [4, 1000]) ef_construction: (Optional) Maximal candidates to consider when building the graph (Valid Range: [1, 100000]) ef_search: (Optional) Minimum candidates for top-k-nearest neighbor queries (Valid Range: [1, 100000]) index_name: (Optional) Custom index name. Defaults to <table_name>_<distance_strategy>_idx """ # Set default index name if not provided distance_func_name = HANA_DISTANCE_FUNCTION[self.distance_strategy][0] default_index_name = f"{self.table_name}_{distance_func_name}_idx" # Use provided index_name or default index_name = ( HanaDB._sanitize_name(index_name) if index_name else default_index_name ) # Initialize build_config and search_config as empty dictionaries build_config = {} search_config = {} # Validate and add m parameter to build_config if provided if m is not None: m = HanaDB._sanitize_int(m) if not (4 <= m <= 1000): raise ValueError("M must be in the range [4, 1000]") build_config["M"] = m # Validate and add ef_construction to build_config if provided if ef_construction is not None: ef_construction = HanaDB._sanitize_int(ef_construction) if not (1 <= ef_construction <= 100000): raise ValueError("efConstruction must be in the range [1, 100000]") build_config["efConstruction"] = ef_construction # Validate and add ef_search to search_config if provided if ef_search is not None: ef_search = HanaDB._sanitize_int(ef_search) if not (1 <= ef_search <= 100000): raise ValueError("efSearch must be in the range [1, 100000]") search_config["efSearch"] = ef_search # Convert build_config and search_config to JSON strings if they contain values build_config_str = json.dumps(build_config) if build_config else "" search_config_str = json.dumps(search_config) if search_config else "" # Create the index SQL string with the ONLINE keyword sql_str = ( f'CREATE HNSW VECTOR INDEX {index_name} ON "{self.table_name}" ' f'("{self.vector_column}") ' f"SIMILARITY FUNCTION {distance_func_name} " ) # Append build_config to the SQL string if provided if build_config_str: sql_str += f"BUILD CONFIGURATION '{build_config_str}' " # Append search_config to the SQL string if provided if search_config_str: sql_str += f"SEARCH CONFIGURATION '{search_config_str}' " # Always add the ONLINE option sql_str += "ONLINE " cur = self.connection.cursor() try: cur.execute(sql_str) finally: cur.close() def add_texts( # type: ignore[override] self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, embeddings: Optional[List[List[float]]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. Defaults to None. embeddings (Optional[List[List[float]]], optional): Optional pre-generated embeddings. Defaults to None. Returns: List[str]: empty list """ # Create all embeddings of the texts beforehand to improve performance if embeddings is None: embeddings = self.embedding.embed_documents(list(texts)) # Create sql parameters array sql_params = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} metadata, extracted_special_metadata = self._split_off_special_metadata( metadata ) embedding = ( embeddings[i] if embeddings else self.embedding.embed_documents([text])[0] ) sql_params.append( ( text, json.dumps(HanaDB._sanitize_metadata_keys(metadata)), f"[{','.join(map(str, embedding))}]", *extracted_special_metadata, ) ) # Insert data into the table cur = self.connection.cursor() try: specific_metadata_columns_string = '", "'.join( self.specific_metadata_columns ) if specific_metadata_columns_string: specific_metadata_columns_string = ( ', "' + specific_metadata_columns_string + '"' ) sql_str = ( f'INSERT INTO "{self.table_name}" ("{self.content_column}", ' f'"{self.metadata_column}", ' f'"{self.vector_column}"{specific_metadata_columns_string}) ' f"VALUES (?, ?, TO_REAL_VECTOR (?)" f"{', ?' * len(self.specific_metadata_columns)});" ) cur.executemany(sql_str, sql_params) finally: cur.close() return [] @classmethod def from_texts( # type: ignore[no-untyped-def, override] cls: Type[HanaDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, connection: dbapi.Connection = None, distance_strategy: DistanceStrategy = default_distance_strategy, table_name: str = default_table_name, content_column: str = default_content_column, metadata_column: str = default_metadata_column, vector_column: str = default_vector_column, vector_column_length: int = default_vector_column_length, *, specific_metadata_columns: Optional[List[str]] = None, ): """Create a HanaDB instance from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a table if it does not yet exist. 3. Adds the documents to the table. This is intended to be a quick way to get started. """ instance = cls( connection=connection, embedding=embedding, distance_strategy=distance_strategy, table_name=table_name, content_column=content_column, metadata_column=metadata_column, vector_column=vector_column, vector_column_length=vector_column_length, # -1 means dynamic length specific_metadata_columns=specific_metadata_columns, ) instance.add_texts(texts, metadatas) return instance def similarity_search( # type: ignore[override] self, query: str, k: int = 4, filter: Optional[dict] = None ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None ) -> List[Tuple[Document, float]]: """Return documents and score values most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of tuples (containing a Document and a score) that are most similar to the query """ embedding = self.embedding.embed_query(query) return self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) def similarity_search_with_score_and_vector_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None ) -> List[Tuple[Document, float, List[float]]]: """Return docs most similar to the given embedding. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score and the document's embedding vector for each """ result = [] k = HanaDB._sanitize_int(k) embedding = HanaDB._sanitize_list_float(embedding) distance_func_name = HANA_DISTANCE_FUNCTION[self.distance_strategy][0] embedding_as_str = "[" + ",".join(map(str, embedding)) + "]" sql_str = ( f"SELECT TOP {k}" f' "{self.content_column}", ' # row[0] f' "{self.metadata_column}", ' # row[1] f' TO_NVARCHAR("{self.vector_column}"), ' # row[2] f' {distance_func_name}("{self.vector_column}", TO_REAL_VECTOR (?)) AS CS ' f'FROM "{self.table_name}"' ) order_str = f" order by CS {HANA_DISTANCE_FUNCTION[self.distance_strategy][1]}" where_str, query_tuple = self._create_where_by_filter(filter) query_tuple = (embedding_as_str,) + tuple(query_tuple) sql_str = sql_str + where_str sql_str = sql_str + order_str try: cur = self.connection.cursor() cur.execute(sql_str, query_tuple) if cur.has_result_set(): rows = cur.fetchall() for row in rows: js = json.loads(row[1]) doc = Document(page_content=row[0], metadata=js) result_vector = HanaDB._parse_float_array_from_string(row[2]) result.append((doc, row[3], result_vector)) finally: cur.close() return result def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to the given embedding. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query and score for each """ whole_result = self.similarity_search_with_score_and_vector_by_vector( embedding=embedding, k=k, filter=filter ) return [(result_item[0], result_item[1]) for result_item in whole_result] def similarity_search_by_vector( # type: ignore[override] self, embedding: List[float], k: int = 4, filter: Optional[dict] = None ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: A dictionary of metadata fields and values to filter by. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def _create_where_by_filter(self, filter): # type: ignore[no-untyped-def] query_tuple = [] where_str = "" if filter: where_str, query_tuple = self._process_filter_object(filter) where_str = " WHERE " + where_str return where_str, query_tuple def _process_filter_object(self, filter): # type: ignore[no-untyped-def] query_tuple = [] where_str = "" if filter: for i, key in enumerate(filter.keys()): filter_value = filter[key] if i != 0: where_str += " AND " # Handling of 'special' boolean operators "$and", "$or" if key in LOGICAL_OPERATORS_TO_SQL: logical_operator = LOGICAL_OPERATORS_TO_SQL[key] logical_operands = filter_value for j, logical_operand in enumerate(logical_operands): if j != 0: where_str += f" {logical_operator} " ( where_str_logical, query_tuple_logical, ) = self._process_filter_object(logical_operand) where_str += "(" + where_str_logical + ")" query_tuple += query_tuple_logical continue operator = "=" sql_param = "?" if isinstance(filter_value, bool): query_tuple.append("true" if filter_value else "false") elif isinstance(filter_value, int) or isinstance(filter_value, str): query_tuple.append(filter_value) elif isinstance(filter_value, Dict): # Handling of 'special' operators starting with "$" special_op = next(iter(filter_value)) special_val = filter_value[special_op] # "$eq", "$ne", "$lt", "$lte", "$gt", "$gte" if special_op in COMPARISONS_TO_SQL: operator = COMPARISONS_TO_SQL[special_op] if isinstance(special_val, bool): query_tuple.append("true" if special_val else "false") elif isinstance(special_val, float): sql_param = "CAST(? as float)" query_tuple.append(special_val) elif ( isinstance(special_val, dict) and "type" in special_val and special_val["type"] == "date" ): # Date type sql_param = "CAST(? as DATE)" query_tuple.append(special_val["date"]) else: query_tuple.append(special_val) # "$between" elif special_op == BETWEEN_OPERATOR: between_from = special_val[0] between_to = special_val[1] operator = "BETWEEN" sql_param = "? AND ?" query_tuple.append(between_from) query_tuple.append(between_to) # "$like" elif special_op == LIKE_OPERATOR: operator = "LIKE" query_tuple.append(special_val) # "$in", "$nin" elif special_op in IN_OPERATORS_TO_SQL: operator = IN_OPERATORS_TO_SQL[special_op] if isinstance(special_val, list): for i, list_entry in enumerate(special_val): if i == 0: sql_param = "(" sql_param = sql_param + "?" if i == (len(special_val) - 1): sql_param = sql_param + ")" else: sql_param = sql_param + "," query_tuple.append(list_entry) else: raise ValueError( f"Unsupported value for {operator}: {special_val}" ) else: raise ValueError(f"Unsupported operator: {special_op}") else: raise ValueError( f"Unsupported filter data-type: {type(filter_value)}" ) selector = ( f' "{key}"' if key in self.specific_metadata_columns else f"JSON_VALUE({self.metadata_column}, '$.{key}')" ) where_str += f"{selector} " f"{operator} {sql_param}" return where_str, query_tuple def delete( # type: ignore[override] self, ids: Optional[List[str]] = None, filter: Optional[dict] = None ) -> Optional[bool]: """Delete entries by filter with metadata values Args: ids: Deletion with ids is not supported! A ValueError will be raised. filter: A dictionary of metadata fields and values to filter by. An empty filter ({}) will delete all entries in the table. Returns: Optional[bool]: True, if deletion is technically successful. Deletion of zero entries, due to non-matching filters is a success. """ if ids is not None: raise ValueError("Deletion via ids is not supported") if filter is None: raise ValueError("Parameter 'filter' is required when calling 'delete'") where_str, query_tuple = self._create_where_by_filter(filter) sql_str = f'DELETE FROM "{self.table_name}" {where_str}' try: cur = self.connection.cursor() cur.execute(sql_str, query_tuple) finally: cur.close() return True async def adelete( # type: ignore[override] self, ids: Optional[List[str]] = None, filter: Optional[dict] = None ) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ return await run_in_executor(None, self.delete, ids=ids, filter=filter) def max_marginal_relevance_search( # type: ignore[override] self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: search query text. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, ) def _parse_float_array_from_string(array_as_string: str) -> List[float]: # type: ignore[misc] array_wo_brackets = array_as_string[1:-1] return [float(x) for x in array_wo_brackets.split(",")] def max_marginal_relevance_search_by_vector( # type: ignore[override] self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, ) -> List[Document]: whole_result = self.similarity_search_with_score_and_vector_by_vector( embedding=embedding, k=fetch_k, filter=filter ) embeddings = [result_item[2] for result_item in whole_result] mmr_doc_indexes = maximal_marginal_relevance( np.array(embedding), embeddings, lambda_mult=lambda_mult, k=k ) return [whole_result[i][0] for i in mmr_doc_indexes] async def amax_marginal_relevance_search_by_vector( # type: ignore[override] self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" return await run_in_executor( None, self.max_marginal_relevance_search_by_vector, embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, ) @staticmethod def _cosine_relevance_score_fn(distance: float) -> float: return distance def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. Vectorstores should define their own selection based method of relevance. """ if self.distance_strategy == DistanceStrategy.COSINE: return HanaDB._cosine_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return HanaDB._euclidean_relevance_score_fn else: raise ValueError( "Unsupported distance_strategy: {}".format(self.distance_strategy) )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/databricks_vector_search.py
from __future__ import annotations import json import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, ) import numpy as np from langchain_core._api import deprecated, warn_deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VST, VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: from databricks.vector_search.client import VectorSearchIndex logger = logging.getLogger(__name__) @deprecated( since="0.3.3", removal="1.0", alternative_import="langchain_databricks.DatabricksVectorSearch", ) class DatabricksVectorSearch(VectorStore): """`Databricks Vector Search` vector store. To use, you should have the ``databricks-vectorsearch`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import DatabricksVectorSearch from databricks.vector_search.client import VectorSearchClient vs_client = VectorSearchClient() vs_index = vs_client.get_index( endpoint_name="vs_endpoint", index_name="ml.llm.index" ) vectorstore = DatabricksVectorSearch(vs_index) Args: index: A Databricks Vector Search index object. embedding: The embedding model. Required for direct-access index or delta-sync index with self-managed embeddings. text_column: The name of the text column to use for the embeddings. Required for direct-access index or delta-sync index with self-managed embeddings. Make sure the text column specified is in the index. columns: The list of column names to get when doing the search. Defaults to ``[primary_key, text_column]``. Delta-sync index with Databricks-managed embeddings manages the ingestion, deletion, and embedding for you. Manually ingestion/deletion of the documents/texts is not supported for delta-sync index. If you want to use a delta-sync index with self-managed embeddings, you need to provide the embedding model and text column name to use for the embeddings. Example: .. code-block:: python from langchain_community.vectorstores import DatabricksVectorSearch from databricks.vector_search.client import VectorSearchClient from langchain_community.embeddings.openai import OpenAIEmbeddings vs_client = VectorSearchClient() vs_index = vs_client.get_index( endpoint_name="vs_endpoint", index_name="ml.llm.index" ) vectorstore = DatabricksVectorSearch( index=vs_index, embedding=OpenAIEmbeddings(), text_column="document_content" ) If you want to manage the documents ingestion/deletion yourself, you can use a direct-access index. Example: .. code-block:: python from langchain_community.vectorstores import DatabricksVectorSearch from databricks.vector_search.client import VectorSearchClient from langchain_community.embeddings.openai import OpenAIEmbeddings vs_client = VectorSearchClient() vs_index = vs_client.get_index( endpoint_name="vs_endpoint", index_name="ml.llm.index" ) vectorstore = DatabricksVectorSearch( index=vs_index, embedding=OpenAIEmbeddings(), text_column="document_content" ) vectorstore.add_texts( texts=["text1", "text2"] ) For more information on Databricks Vector Search, see `Databricks Vector Search documentation: https://docs.databricks.com/en/generative-ai/vector-search.html. """ def __init__( self, index: VectorSearchIndex, *, embedding: Optional[Embeddings] = None, text_column: Optional[str] = None, columns: Optional[List[str]] = None, ): try: from databricks.vector_search.client import VectorSearchIndex except ImportError as e: raise ImportError( "Could not import databricks-vectorsearch python package. " "Please install it with `pip install databricks-vectorsearch`." ) from e # index self.index = index if not isinstance(index, VectorSearchIndex): raise TypeError("index must be of type VectorSearchIndex.") # index_details index_details = self.index.describe() self.primary_key = index_details["primary_key"] self.index_type = index_details.get("index_type") self._delta_sync_index_spec = index_details.get("delta_sync_index_spec", dict()) self._direct_access_index_spec = index_details.get( "direct_access_index_spec", dict() ) # text_column if self._is_databricks_managed_embeddings(): index_source_column = self._embedding_source_column_name() # check if input text column matches the source column of the index if text_column is not None and text_column != index_source_column: raise ValueError( f"text_column '{text_column}' does not match with the " f"source column of the index: '{index_source_column}'." ) self.text_column = index_source_column else: self._require_arg(text_column, "text_column") self.text_column = text_column # columns self.columns = columns or [] # add primary key column and source column if not in columns if self.primary_key not in self.columns: self.columns.append(self.primary_key) if self.text_column and self.text_column not in self.columns: self.columns.append(self.text_column) # Validate specified columns are in the index if self._is_direct_access_index(): index_schema = self._index_schema() if index_schema: for col in self.columns: if col not in index_schema: raise ValueError( f"column '{col}' is not in the index's schema." ) # embedding model if not self._is_databricks_managed_embeddings(): # embedding model is required for direct-access index # or delta-sync index with self-managed embedding self._require_arg(embedding, "embedding") self._embedding = embedding # validate dimension matches index_embedding_dimension = self._embedding_vector_column_dimension() if index_embedding_dimension is not None: inferred_embedding_dimension = self._infer_embedding_dimension() if inferred_embedding_dimension != index_embedding_dimension: raise ValueError( f"embedding model's dimension '{inferred_embedding_dimension}' " f"does not match with the index's dimension " f"'{index_embedding_dimension}'." ) else: if embedding is not None: logger.warning( "embedding model is not used in delta-sync index with " "Databricks-managed embeddings." ) self._embedding = None @classmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict]] = None, **kwargs: Any, ) -> VST: raise NotImplementedError( "`from_texts` is not supported. " "Use `add_texts` to add to existing direct-access index." ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict]] = None, ids: Optional[List[Any]] = None, **kwargs: Any, ) -> List[str]: """Add texts to the index. Only support direct-access index. Args: texts: List of texts to add. metadatas: List of metadata for each text. Defaults to None. ids: List of ids for each text. Defaults to None. If not provided, a random uuid will be generated for each text. Returns: List of ids from adding the texts into the index. """ self._op_require_direct_access_index("add_texts") assert self.embeddings is not None, "embedding model is required." # Wrap to list if input texts is a single string if isinstance(texts, str): texts = [texts] texts = list(texts) vectors = self.embeddings.embed_documents(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] metadatas = metadatas or [{} for _ in texts] updates = [ { self.primary_key: id_, self.text_column: text, self._embedding_vector_column_name(): vector, **metadata, } for text, vector, id_, metadata in zip(texts, vectors, ids, metadatas) ] upsert_resp = self.index.upsert(updates) if upsert_resp.get("status") in ("PARTIAL_SUCCESS", "FAILURE"): failed_ids = upsert_resp.get("result", dict()).get( "failed_primary_keys", [] ) if upsert_resp.get("status") == "FAILURE": logger.error("Failed to add texts to the index.") else: logger.warning("Some texts failed to be added to the index.") return [id_ for id_ in ids if id_ not in failed_ids] return ids @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" return self._embedding def delete(self, ids: Optional[List[Any]] = None, **kwargs: Any) -> Optional[bool]: """Delete documents from the index. Only support direct-access index. Args: ids: List of ids of documents to delete. Returns: True if successful. """ self._op_require_direct_access_index("delete") if ids is None: raise ValueError("ids must be provided.") self.index.delete(ids) return True def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, *, query_type: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filters to apply to the query. Defaults to None. query_type: The type of this query. Supported values are "ANN" and "HYBRID". Returns: List of Documents most similar to the embedding. """ docs_with_score = self.similarity_search_with_score( query=query, k=k, filter=filter, query_type=query_type, **kwargs, ) return [doc for doc, _ in docs_with_score] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, *, query_type: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filters to apply to the query. Defaults to None. query_type: The type of this query. Supported values are "ANN" and "HYBRID". Returns: List of Documents most similar to the embedding and score for each. """ if self._is_databricks_managed_embeddings(): query_text = query query_vector = None else: assert self.embeddings is not None, "embedding model is required." # The value for `query_text` needs to be specified only for hybrid search. if query_type is not None and query_type.upper() == "HYBRID": query_text = query else: query_text = None query_vector = self.embeddings.embed_query(query) search_resp = self.index.similarity_search( columns=self.columns, query_text=query_text, query_vector=query_vector, filters=filter or _alias_filters(kwargs), num_results=k, query_type=query_type, ) return self._parse_search_response(search_resp) @staticmethod def _identity_fn(score: float) -> float: return score def _select_relevance_score_fn(self) -> Callable[[float], float]: """ Databricks Vector search uses a normalized score 1/(1+d) where d is the L2 distance. Hence, we simply return the identity function. """ return self._identity_fn def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, *, query_type: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filters to apply to the query. Defaults to None. query_type: The type of this query. Supported values are "ANN" and "HYBRID". Returns: List of Documents selected by maximal marginal relevance. """ if not self._is_databricks_managed_embeddings(): assert self.embeddings is not None, "embedding model is required." query_vector = self.embeddings.embed_query(query) else: raise ValueError( "`max_marginal_relevance_search` is not supported for index with " "Databricks-managed embeddings." ) docs = self.max_marginal_relevance_search_by_vector( query_vector, k, fetch_k, lambda_mult=lambda_mult, filter=filter or _alias_filters(kwargs), query_type=query_type, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Any] = None, *, query_type: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filters to apply to the query. Defaults to None. query_type: The type of this query. Supported values are "ANN" and "HYBRID". Returns: List of Documents selected by maximal marginal relevance. """ if not self._is_databricks_managed_embeddings(): embedding_column = self._embedding_vector_column_name() else: raise ValueError( "`max_marginal_relevance_search` is not supported for index with " "Databricks-managed embeddings." ) search_resp = self.index.similarity_search( columns=list(set(self.columns + [embedding_column])), query_text=None, query_vector=embedding, filters=filter or _alias_filters(kwargs), num_results=fetch_k, query_type=query_type, ) embeddings_result_index = ( search_resp.get("manifest").get("columns").index({"name": embedding_column}) ) embeddings = [ doc[embeddings_result_index] for doc in search_resp.get("result").get("data_array") ] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) ignore_cols: List = ( [embedding_column] if embedding_column not in self.columns else [] ) candidates = self._parse_search_response(search_resp, ignore_cols=ignore_cols) selected_results = [r[0] for i, r in enumerate(candidates) if i in mmr_selected] return selected_results def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Any] = None, *, query_type: Optional[str] = None, query: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filters to apply to the query. Defaults to None. query_type: The type of this query. Supported values are "ANN" and "HYBRID". Returns: List of Documents most similar to the embedding. """ docs_with_score = self.similarity_search_by_vector_with_score( embedding=embedding, k=k, filter=filter, query_type=query_type, query=query, **kwargs, ) return [doc for doc, _ in docs_with_score] def similarity_search_by_vector_with_score( self, embedding: List[float], k: int = 4, filter: Optional[Any] = None, *, query_type: Optional[str] = None, query: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector, along with scores. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filters to apply to the query. Defaults to None. query_type: The type of this query. Supported values are "ANN" and "HYBRID". Returns: List of Documents most similar to the embedding and score for each. """ if self._is_databricks_managed_embeddings(): raise ValueError( "`similarity_search_by_vector` is not supported for index with " "Databricks-managed embeddings." ) if query_type is not None and query_type.upper() == "HYBRID": if query is None: raise ValueError( "A value for `query` must be specified for hybrid search." ) query_text = query else: if query is not None: raise ValueError( ( "Cannot specify both `embedding` and " '`query` unless `query_type="HYBRID"' ) ) query_text = None search_resp = self.index.similarity_search( columns=self.columns, query_vector=embedding, query_text=query_text, filters=filter or _alias_filters(kwargs), num_results=k, query_type=query_type, ) return self._parse_search_response(search_resp) def _parse_search_response( self, search_resp: Dict, ignore_cols: Optional[List[str]] = None ) -> List[Tuple[Document, float]]: """Parse the search response into a list of Documents with score.""" if ignore_cols is None: ignore_cols = [] columns = [ col["name"] for col in search_resp.get("manifest", dict()).get("columns", []) ] docs_with_score = [] for result in search_resp.get("result", dict()).get("data_array", []): doc_id = result[columns.index(self.primary_key)] text_content = result[columns.index(self.text_column)] metadata = { col: value for col, value in zip(columns[:-1], result[:-1]) if col not in ([self.primary_key, self.text_column] + ignore_cols) } metadata[self.primary_key] = doc_id score = result[-1] doc = Document(page_content=text_content, metadata=metadata) docs_with_score.append((doc, score)) return docs_with_score def _index_schema(self) -> Optional[Dict]: """Return the index schema as a dictionary. Return None if no schema found. """ if self._is_direct_access_index(): schema_json = self._direct_access_index_spec.get("schema_json") if schema_json is not None: return json.loads(schema_json) return None def _embedding_vector_column_name(self) -> Optional[str]: """Return the name of the embedding vector column. None if the index is not a self-managed embedding index. """ return self._embedding_vector_column().get("name") def _embedding_vector_column_dimension(self) -> Optional[int]: """Return the dimension of the embedding vector column. None if the index is not a self-managed embedding index. """ return self._embedding_vector_column().get("embedding_dimension") def _embedding_vector_column(self) -> Dict: """Return the embedding vector column configs as a dictionary. Empty if the index is not a self-managed embedding index. """ index_spec = ( self._delta_sync_index_spec if self._is_delta_sync_index() else self._direct_access_index_spec ) return next(iter(index_spec.get("embedding_vector_columns") or list()), dict()) def _embedding_source_column_name(self) -> Optional[str]: """Return the name of the embedding source column. None if the index is not a Databricks-managed embedding index. """ return self._embedding_source_column().get("name") def _embedding_source_column(self) -> Dict: """Return the embedding source column configs as a dictionary. Empty if the index is not a Databricks-managed embedding index. """ index_spec = self._delta_sync_index_spec return next(iter(index_spec.get("embedding_source_columns") or list()), dict()) def _is_delta_sync_index(self) -> bool: """Return True if the index is a delta-sync index.""" return self.index_type == "DELTA_SYNC" def _is_direct_access_index(self) -> bool: """Return True if the index is a direct-access index.""" return self.index_type == "DIRECT_ACCESS" def _is_databricks_managed_embeddings(self) -> bool: """Return True if the embeddings are managed by Databricks Vector Search.""" return ( self._is_delta_sync_index() and self._embedding_source_column_name() is not None ) def _infer_embedding_dimension(self) -> int: """Infer the embedding dimension from the embedding function.""" assert self.embeddings is not None, "embedding model is required." return len(self.embeddings.embed_query("test")) def _op_require_direct_access_index(self, op_name: str) -> None: """ Raise ValueError if the operation is not supported for direct-access index.""" if not self._is_direct_access_index(): raise ValueError(f"`{op_name}` is only supported for direct-access index.") @staticmethod def _require_arg(arg: Any, arg_name: str) -> None: """Raise ValueError if the required arg with name `arg_name` is None.""" if not arg: raise ValueError(f"`{arg_name}` is required for this index.") def _alias_filters(kwargs: Dict[str, Any]) -> Optional[Dict[str, Any]]: """ The `filters` argument was used in the previous versions. It is now replaced with `filter` for consistency with other vector stores, but we still support `filters` for backward compatibility. """ if "filters" in kwargs: warn_deprecated( since="0.2.11", removal="1.0", message="DatabricksVectorSearch received a key `filters` in search_kwargs. " "`filters` was deprecated since langchain-community 0.2.11 and will " "be removed in 0.3. Please use `filter` instead.", ) return kwargs.pop("filters", None)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/tencentvectordb.py
"""Wrapper around the Tencent vector database.""" from __future__ import annotations import json import logging import time from enum import Enum from typing import ( Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union, cast, ) import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import guard_import from langchain_core.vectorstores import VectorStore from pydantic import BaseModel from langchain_community.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) META_FIELD_TYPE_UINT64 = "uint64" META_FIELD_TYPE_STRING = "string" META_FIELD_TYPE_ARRAY = "array" META_FIELD_TYPE_VECTOR = "vector" META_FIELD_TYPES = [ META_FIELD_TYPE_UINT64, META_FIELD_TYPE_STRING, META_FIELD_TYPE_ARRAY, META_FIELD_TYPE_VECTOR, ] class ConnectionParams: """Tencent vector DB Connection params. See the following documentation for details: https://cloud.tencent.com/document/product/1709/95820 Attribute: url (str) : The access address of the vector database server that the client needs to connect to. key (str): API key for client to access the vector database server, which is used for authentication. username (str) : Account for client to access the vector database server. timeout (int) : Request Timeout. """ def __init__(self, url: str, key: str, username: str = "root", timeout: int = 10): self.url = url self.key = key self.username = username self.timeout = timeout class IndexParams: """Tencent vector DB Index params. See the following documentation for details: https://cloud.tencent.com/document/product/1709/95826 """ def __init__( self, dimension: int, shard: int = 1, replicas: int = 2, index_type: str = "HNSW", metric_type: str = "L2", params: Optional[Dict] = None, ): self.dimension = dimension self.shard = shard self.replicas = replicas self.index_type = index_type self.metric_type = metric_type self.params = params class MetaField(BaseModel): """MetaData Field for Tencent vector DB.""" name: str description: Optional[str] data_type: Union[str, Enum] index: bool = False def __init__(self, **data: Any) -> None: super().__init__(**data) enum = guard_import("tcvectordb.model.enum") if isinstance(self.data_type, str): if self.data_type not in META_FIELD_TYPES: raise ValueError(f"unsupported data_type {self.data_type}") target = [ fe for fe in enum.FieldType if fe.value.lower() == self.data_type.lower() ] if target: self.data_type = target[0] else: raise ValueError(f"unsupported data_type {self.data_type}") else: if self.data_type not in enum.FieldType: raise ValueError(f"unsupported data_type {self.data_type}") def translate_filter( lc_filter: str, allowed_fields: Optional[Sequence[str]] = None ) -> str: """Translate LangChain filter to Tencent VectorDB filter. Args: lc_filter (str): LangChain filter. allowed_fields (Optional[Sequence[str]]): Allowed fields for filter. Returns: str: Translated filter. """ from langchain.chains.query_constructor.base import fix_filter_directive from langchain.chains.query_constructor.parser import get_parser from langchain.retrievers.self_query.tencentvectordb import ( TencentVectorDBTranslator, ) from langchain_core.structured_query import FilterDirective tvdb_visitor = TencentVectorDBTranslator(allowed_fields) flt = cast( Optional[FilterDirective], get_parser( allowed_comparators=tvdb_visitor.allowed_comparators, allowed_operators=tvdb_visitor.allowed_operators, allowed_attributes=allowed_fields, ).parse(lc_filter), ) flt = fix_filter_directive(flt) return flt.accept(tvdb_visitor) if flt else "" class TencentVectorDB(VectorStore): """Tencent VectorDB as a vector store. In order to use this you need to have a database instance. See the following documentation for details: https://cloud.tencent.com/document/product/1709/104489 """ field_id: str = "id" field_vector: str = "vector" field_text: str = "text" field_metadata: str = "metadata" def __init__( self, embedding: Embeddings, connection_params: ConnectionParams, index_params: IndexParams = IndexParams(768), database_name: str = "LangChainDatabase", collection_name: str = "LangChainCollection", drop_old: Optional[bool] = False, collection_description: Optional[str] = "Collection for LangChain", meta_fields: Optional[List[MetaField]] = None, t_vdb_embedding: Optional[str] = "bge-base-zh", ): self.document = guard_import("tcvectordb.model.document") tcvectordb = guard_import("tcvectordb") tcollection = guard_import("tcvectordb.model.collection") enum = guard_import("tcvectordb.model.enum") self.embedding_model = None if embedding is None and t_vdb_embedding: embedding_model = [ model for model in enum.EmbeddingModel if t_vdb_embedding == model.model_name ] if not any(embedding_model): raise ValueError( f"embedding model `{t_vdb_embedding}` is invalid. " f"choices: {[member.model_name for member in enum.EmbeddingModel]}" ) self.embedding_model = tcollection.Embedding( vector_field="vector", field="text", model=embedding_model[0] ) self.embedding_func = embedding self.index_params = index_params self.collection_description = collection_description self.vdb_client = tcvectordb.VectorDBClient( url=connection_params.url, username=connection_params.username, key=connection_params.key, timeout=connection_params.timeout, ) self.meta_fields = meta_fields db_list = self.vdb_client.list_databases() db_exist: bool = False for db in db_list: if database_name == db.database_name: db_exist = True break if db_exist: self.database = self.vdb_client.database(database_name) else: self.database = self.vdb_client.create_database(database_name) try: self.collection = self.database.describe_collection(collection_name) if drop_old: self.database.drop_collection(collection_name) self._create_collection(collection_name) except tcvectordb.exceptions.VectorDBException: self._create_collection(collection_name) def _create_collection(self, collection_name: str) -> None: enum = guard_import("tcvectordb.model.enum") vdb_index = guard_import("tcvectordb.model.index") index_type = enum.IndexType.__members__.get(self.index_params.index_type) if index_type is None: raise ValueError("unsupported index_type") metric_type = enum.MetricType.__members__.get(self.index_params.metric_type) if metric_type is None: raise ValueError("unsupported metric_type") params = vdb_index.HNSWParams( m=(self.index_params.params or {}).get("M", 16), efconstruction=(self.index_params.params or {}).get("efConstruction", 200), ) index = vdb_index.Index( vdb_index.FilterIndex( self.field_id, enum.FieldType.String, enum.IndexType.PRIMARY_KEY ), vdb_index.VectorIndex( self.field_vector, self.index_params.dimension, index_type, metric_type, params, ), vdb_index.FilterIndex( self.field_text, enum.FieldType.String, enum.IndexType.FILTER ), ) # Add metadata indexes if self.meta_fields is not None: index_meta_fields = [field for field in self.meta_fields if field.index] for field in index_meta_fields: ft_index = vdb_index.FilterIndex( field.name, field.data_type, enum.IndexType.FILTER ) index.add(ft_index) else: index.add( vdb_index.FilterIndex( self.field_metadata, enum.FieldType.String, enum.IndexType.FILTER ) ) self.collection = self.database.create_collection( name=collection_name, shard=self.index_params.shard, replicas=self.index_params.replicas, description=self.collection_description, index=index, embedding=self.embedding_model, ) @property def embeddings(self) -> Embeddings: return self.embedding_func def delete( self, ids: Optional[List[str]] = None, filter_expr: Optional[str] = None, **kwargs: Any, ) -> Optional[bool]: """Delete documents from the collection.""" delete_attrs = {} if ids: delete_attrs["ids"] = ids if filter_expr: delete_attrs["filter"] = self.document.Filter(filter_expr) self.collection.delete(**delete_attrs) return True @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, connection_params: Optional[ConnectionParams] = None, index_params: Optional[IndexParams] = None, database_name: str = "LangChainDatabase", collection_name: str = "LangChainCollection", drop_old: Optional[bool] = False, collection_description: Optional[str] = "Collection for LangChain", meta_fields: Optional[List[MetaField]] = None, t_vdb_embedding: Optional[str] = "bge-base-zh", **kwargs: Any, ) -> TencentVectorDB: """Create a collection, indexes it with HNSW, and insert data.""" if len(texts) == 0: raise ValueError("texts is empty") if connection_params is None: raise ValueError("connection_params is empty") enum = guard_import("tcvectordb.model.enum") if embedding is None and t_vdb_embedding is None: raise ValueError("embedding and t_vdb_embedding cannot be both None") if embedding: embeddings = embedding.embed_documents(texts[0:1]) dimension = len(embeddings[0]) else: embedding_model = [ model for model in enum.EmbeddingModel if t_vdb_embedding == model.model_name ] if not any(embedding_model): raise ValueError( f"embedding model `{t_vdb_embedding}` is invalid. " f"choices: {[member.model_name for member in enum.EmbeddingModel]}" ) dimension = embedding_model[0]._EmbeddingModel__dimensions if index_params is None: index_params = IndexParams(dimension=dimension) else: index_params.dimension = dimension vector_db = cls( embedding=embedding, connection_params=connection_params, index_params=index_params, database_name=database_name, collection_name=collection_name, drop_old=drop_old, collection_description=collection_description, meta_fields=meta_fields, t_vdb_embedding=t_vdb_embedding, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Insert text data into TencentVectorDB.""" texts = list(texts) if len(texts) == 0: logger.debug("Nothing to insert, skipping.") return [] if self.embedding_func: embeddings = self.embedding_func.embed_documents(texts) else: embeddings = [] pks: list[str] = [] total_count = len(texts) for start in range(0, total_count, batch_size): # Grab end index docs = [] end = min(start + batch_size, total_count) for id in range(start, end, 1): metadata = ( self._get_meta(metadatas[id]) if metadatas and metadatas[id] else {} ) doc_id = ids[id] if ids else None doc_attrs: Dict[str, Any] = { "id": doc_id or "{}-{}-{}".format(time.time_ns(), hash(texts[id]), id) } if embeddings: doc_attrs["vector"] = embeddings[id] doc_attrs["text"] = texts[id] doc_attrs.update(metadata) doc = self.document.Document(**doc_attrs) docs.append(doc) pks.append(doc_attrs["id"]) self.collection.upsert(docs, timeout) return pks def similarity_search( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string.""" res = self.similarity_search_with_score( query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] def similarity_search_with_score( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score.""" # Embed the query text. if self.embedding_func: embedding = self.embedding_func.embed_query(query) return self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs, ) return self.similarity_search_with_score_by_vector( embedding=[], k=k, param=param, expr=expr, timeout=timeout, query=query, **kwargs, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string.""" docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, filter: Optional[str] = None, timeout: Optional[int] = None, query: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score.""" if filter and not expr: expr = translate_filter( filter, [f.name for f in (self.meta_fields or []) if f.index] ) search_args = { "filter": self.document.Filter(expr) if expr else None, "params": self.document.HNSWSearchParams(ef=(param or {}).get("ef", 10)), "retrieve_vector": False, "limit": k, "timeout": timeout, } if query: search_args["embeddingItems"] = [query] res: List[List[Dict]] = self.collection.searchByText(**search_args).get( "documents" ) else: search_args["vectors"] = [embedding] res = self.collection.search(**search_args) ret: List[Tuple[Document, float]] = [] if res is None or len(res) == 0: return ret for result in res[0]: meta = self._get_meta(result) doc = Document(page_content=result.get(self.field_text), metadata=meta) # type: ignore[arg-type] pair = (doc, result.get("score", 0.0)) ret.append(pair) return ret def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR.""" if self.embedding_func: embedding = self.embedding_func.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, param=param, expr=expr, timeout=timeout, **kwargs, ) # tvdb will do the query embedding docs = self.similarity_search_with_score( query=query, k=fetch_k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in docs] def _get_meta(self, result: Dict) -> Dict: """Get metadata from the result.""" if self.meta_fields: return {field.name: result.get(field.name) for field in self.meta_fields} elif result.get(self.field_metadata): raw_meta = result.get(self.field_metadata) if raw_meta and isinstance(raw_meta, str): return json.loads(raw_meta) return {} def max_marginal_relevance_search_by_vector( self, embedding: list[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, filter: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR.""" if filter and not expr: expr = translate_filter( filter, [f.name for f in (self.meta_fields or []) if f.index] ) res: List[List[Dict]] = self.collection.search( vectors=[embedding], filter=self.document.Filter(expr) if expr else None, params=self.document.HNSWSearchParams(ef=(param or {}).get("ef", 10)), retrieve_vector=True, limit=fetch_k, timeout=timeout, ) # Organize results. documents = [] ordered_result_embeddings = [] for result in res[0]: meta = self._get_meta(result) doc = Document(page_content=result.get(self.field_text), metadata=meta) # type: ignore[arg-type] documents.append(doc) ordered_result_embeddings.append(result.get(self.field_vector)) # Get the new order of results. new_ordering = maximal_marginal_relevance( np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult ) # Reorder the values and return. return [documents[x] for x in new_ordering if x != -1] def _select_relevance_score_fn(self) -> Callable[[float], float]: metric_type = self.index_params.metric_type if metric_type == "COSINE": return self._cosine_relevance_score_fn elif metric_type == "L2": return self._euclidean_relevance_score_fn elif metric_type == "IP": return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance metric of type: {metric_type}." )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/oraclevs.py
from __future__ import annotations import array import functools import hashlib import json import logging import os import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar, Union, cast, ) if TYPE_CHECKING: from oracledb import Connection import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) logger = logging.getLogger(__name__) log_level = os.getenv("LOG_LEVEL", "ERROR").upper() logging.basicConfig( level=getattr(logging, log_level), format="%(asctime)s - %(levelname)s - %(message)s", ) # Define a type variable that can be any kind of function T = TypeVar("T", bound=Callable[..., Any]) def _handle_exceptions(func: T) -> T: @functools.wraps(func) def wrapper(*args: Any, **kwargs: Any) -> Any: try: return func(*args, **kwargs) except RuntimeError as db_err: # Handle a known type of error (e.g., DB-related) specifically logger.exception("DB-related error occurred.") raise RuntimeError( "Failed due to a DB issue: {}".format(db_err) ) from db_err except ValueError as val_err: # Handle another known type of error specifically logger.exception("Validation error.") raise ValueError("Validation failed: {}".format(val_err)) from val_err except Exception as e: # Generic handler for all other exceptions logger.exception("An unexpected error occurred: {}".format(e)) raise RuntimeError("Unexpected error: {}".format(e)) from e return cast(T, wrapper) def _table_exists(client: Connection, table_name: str) -> bool: try: import oracledb except ImportError as e: raise ImportError( "Unable to import oracledb, please install with " "`pip install -U oracledb`." ) from e try: with client.cursor() as cursor: cursor.execute(f"SELECT COUNT(*) FROM {table_name}") return True except oracledb.DatabaseError as ex: err_obj = ex.args if err_obj[0].code == 942: return False raise def _compare_version(version: str, target_version: str) -> bool: # Split both version strings into parts version_parts = [int(part) for part in version.split(".")] target_parts = [int(part) for part in target_version.split(".")] # Compare each part for v, t in zip(version_parts, target_parts): if v < t: return True # Current version is less elif v > t: return False # Current version is greater # If all parts equal so far, check if version has fewer parts than target_version return len(version_parts) < len(target_parts) @_handle_exceptions def _index_exists(client: Connection, index_name: str) -> bool: # Check if the index exists query = """ SELECT index_name FROM all_indexes WHERE upper(index_name) = upper(:idx_name) """ with client.cursor() as cursor: # Execute the query cursor.execute(query, idx_name=index_name.upper()) result = cursor.fetchone() # Check if the index exists return result is not None def _get_distance_function(distance_strategy: DistanceStrategy) -> str: # Dictionary to map distance strategies to their corresponding function # names distance_strategy2function = { DistanceStrategy.EUCLIDEAN_DISTANCE: "EUCLIDEAN", DistanceStrategy.DOT_PRODUCT: "DOT", DistanceStrategy.COSINE: "COSINE", } # Attempt to return the corresponding distance function if distance_strategy in distance_strategy2function: return distance_strategy2function[distance_strategy] # If it's an unsupported distance strategy, raise an error raise ValueError(f"Unsupported distance strategy: {distance_strategy}") def _get_index_name(base_name: str) -> str: unique_id = str(uuid.uuid4()).replace("-", "") return f"{base_name}_{unique_id}" @_handle_exceptions def _create_table(client: Connection, table_name: str, embedding_dim: int) -> None: cols_dict = { "id": "RAW(16) DEFAULT SYS_GUID() PRIMARY KEY", "text": "CLOB", "metadata": "CLOB", "embedding": f"vector({embedding_dim}, FLOAT32)", } if not _table_exists(client, table_name): with client.cursor() as cursor: ddl_body = ", ".join( f"{col_name} {col_type}" for col_name, col_type in cols_dict.items() ) ddl = f"CREATE TABLE {table_name} ({ddl_body})" cursor.execute(ddl) logger.info("Table created successfully...") else: logger.info("Table already exists...") @_handle_exceptions def create_index( client: Connection, vector_store: OracleVS, params: Optional[dict[str, Any]] = None, ) -> None: """Create an index on the vector store. Args: client: The OracleDB connection object. vector_store: The vector store object. params: Optional parameters for the index creation. Raises: ValueError: If an invalid parameter is provided. """ if params: if params["idx_type"] == "HNSW": _create_hnsw_index( client, vector_store.table_name, vector_store.distance_strategy, params ) elif params["idx_type"] == "IVF": _create_ivf_index( client, vector_store.table_name, vector_store.distance_strategy, params ) else: _create_hnsw_index( client, vector_store.table_name, vector_store.distance_strategy, params ) else: _create_hnsw_index( client, vector_store.table_name, vector_store.distance_strategy, params ) return @_handle_exceptions def _create_hnsw_index( client: Connection, table_name: str, distance_strategy: DistanceStrategy, params: Optional[dict[str, Any]] = None, ) -> None: defaults = { "idx_name": "HNSW", "idx_type": "HNSW", "neighbors": 32, "efConstruction": 200, "accuracy": 90, "parallel": 8, } if params: config = params.copy() # Ensure compulsory parts are included for compulsory_key in ["idx_name", "parallel"]: if compulsory_key not in config: if compulsory_key == "idx_name": config[compulsory_key] = _get_index_name( str(defaults[compulsory_key]) ) else: config[compulsory_key] = defaults[compulsory_key] # Validate keys in config against defaults for key in config: if key not in defaults: raise ValueError(f"Invalid parameter: {key}") else: config = defaults # Base SQL statement idx_name = config["idx_name"] base_sql = ( f"create vector index {idx_name} on {table_name}(embedding) " f"ORGANIZATION INMEMORY NEIGHBOR GRAPH" ) # Optional parts depending on parameters accuracy_part = " WITH TARGET ACCURACY {accuracy}" if ("accuracy" in config) else "" distance_part = f" DISTANCE {_get_distance_function(distance_strategy)}" parameters_part = "" if "neighbors" in config and "efConstruction" in config: parameters_part = ( " parameters (type {idx_type}, neighbors {" "neighbors}, efConstruction {efConstruction})" ) elif "neighbors" in config and "efConstruction" not in config: config["efConstruction"] = defaults["efConstruction"] parameters_part = ( " parameters (type {idx_type}, neighbors {" "neighbors}, efConstruction {efConstruction})" ) elif "neighbors" not in config and "efConstruction" in config: config["neighbors"] = defaults["neighbors"] parameters_part = ( " parameters (type {idx_type}, neighbors {" "neighbors}, efConstruction {efConstruction})" ) # Always included part for parallel parallel_part = " parallel {parallel}" # Combine all parts ddl_assembly = ( base_sql + accuracy_part + distance_part + parameters_part + parallel_part ) # Format the SQL with values from the params dictionary ddl = ddl_assembly.format(**config) # Check if the index exists if not _index_exists(client, config["idx_name"]): with client.cursor() as cursor: cursor.execute(ddl) logger.info("Index created successfully...") else: logger.info("Index already exists...") @_handle_exceptions def _create_ivf_index( client: Connection, table_name: str, distance_strategy: DistanceStrategy, params: Optional[dict[str, Any]] = None, ) -> None: # Default configuration defaults = { "idx_name": "IVF", "idx_type": "IVF", "neighbor_part": 32, "accuracy": 90, "parallel": 8, } if params: config = params.copy() # Ensure compulsory parts are included for compulsory_key in ["idx_name", "parallel"]: if compulsory_key not in config: if compulsory_key == "idx_name": config[compulsory_key] = _get_index_name( str(defaults[compulsory_key]) ) else: config[compulsory_key] = defaults[compulsory_key] # Validate keys in config against defaults for key in config: if key not in defaults: raise ValueError(f"Invalid parameter: {key}") else: config = defaults # Base SQL statement idx_name = config["idx_name"] base_sql = ( f"CREATE VECTOR INDEX {idx_name} ON {table_name}(embedding) " f"ORGANIZATION NEIGHBOR PARTITIONS" ) # Optional parts depending on parameters accuracy_part = " WITH TARGET ACCURACY {accuracy}" if ("accuracy" in config) else "" distance_part = f" DISTANCE {_get_distance_function(distance_strategy)}" parameters_part = "" if "idx_type" in config and "neighbor_part" in config: parameters_part = ( f" PARAMETERS (type {config['idx_type']}, neighbor" f" partitions {config['neighbor_part']})" ) # Always included part for parallel parallel_part = f" PARALLEL {config['parallel']}" # Combine all parts ddl_assembly = ( base_sql + accuracy_part + distance_part + parameters_part + parallel_part ) # Format the SQL with values from the params dictionary ddl = ddl_assembly.format(**config) # Check if the index exists if not _index_exists(client, config["idx_name"]): with client.cursor() as cursor: cursor.execute(ddl) logger.info("Index created successfully...") else: logger.info("Index already exists...") @_handle_exceptions def drop_table_purge(client: Connection, table_name: str) -> None: """Drop a table and purge it from the database. Args: client: The OracleDB connection object. table_name: The name of the table to drop. Raises: RuntimeError: If an error occurs while dropping the table. """ if _table_exists(client, table_name): cursor = client.cursor() with cursor: ddl = f"DROP TABLE {table_name} PURGE" cursor.execute(ddl) logger.info("Table dropped successfully...") else: logger.info("Table not found...") return @_handle_exceptions def drop_index_if_exists(client: Connection, index_name: str) -> None: """Drop an index if it exists. Args: client: The OracleDB connection object. index_name: The name of the index to drop. Raises: RuntimeError: If an error occurs while dropping the index. """ if _index_exists(client, index_name): drop_query = f"DROP INDEX {index_name}" with client.cursor() as cursor: cursor.execute(drop_query) logger.info(f"Index {index_name} has been dropped.") else: logger.exception(f"Index {index_name} does not exist.") return class OracleVS(VectorStore): """`OracleVS` vector store. To use, you should have both: - the ``oracledb`` python package installed - a connection string associated with a OracleDBCluster having deployed an Search index Example: .. code-block:: python from langchain.vectorstores import OracleVS from langchain.embeddings.openai import OpenAIEmbeddings import oracledb with oracledb.connect(user = user, passwd = pwd, dsn = dsn) as connection: print ("Database version:", connection.version) embeddings = OpenAIEmbeddings() query = "" vectors = OracleVS(connection, table_name, embeddings, query) """ def __init__( self, client: Connection, embedding_function: Union[ Callable[[str], List[float]], Embeddings, ], table_name: str, distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE, query: Optional[str] = "What is a Oracle database", params: Optional[Dict[str, Any]] = None, ): try: import oracledb except ImportError as e: raise ImportError( "Unable to import oracledb, please install with " "`pip install -U oracledb`." ) from e self.insert_mode = "array" if client.thin is True: if oracledb.__version__ == "2.1.0": raise Exception( "Oracle DB python thin client driver version 2.1.0 not supported" ) elif _compare_version(oracledb.__version__, "2.2.0"): self.insert_mode = "clob" else: self.insert_mode = "array" else: if (_compare_version(oracledb.__version__, "2.1.0")) and ( not ( _compare_version( ".".join(map(str, oracledb.clientversion())), "23.4" ) ) ): raise Exception( "Oracle DB python thick client driver version earlier than " "2.1.0 not supported with client libraries greater than " "equal to 23.4" ) if _compare_version(".".join(map(str, oracledb.clientversion())), "23.4"): self.insert_mode = "clob" else: self.insert_mode = "array" if _compare_version(oracledb.__version__, "2.1.0"): self.insert_mode = "clob" try: """Initialize with oracledb client.""" self.client = client """Initialize with necessary components.""" if not isinstance(embedding_function, Embeddings): logger.warning( "`embedding_function` is expected to be an Embeddings " "object, support " "for passing in a function will soon be removed." ) self.embedding_function = embedding_function self.query = query embedding_dim = self.get_embedding_dimension() self.table_name = table_name self.distance_strategy = distance_strategy self.params = params _create_table(client, table_name, embedding_dim) except oracledb.DatabaseError as db_err: logger.exception(f"Database error occurred while create table: {db_err}") raise RuntimeError( "Failed to create table due to a database error." ) from db_err except ValueError as val_err: logger.exception(f"Validation error: {val_err}") raise RuntimeError( "Failed to create table due to a validation error." ) from val_err except Exception as ex: logger.exception("An unexpected error occurred while creating the index.") raise RuntimeError( "Failed to create table due to an unexpected error." ) from ex @property def embeddings(self) -> Optional[Embeddings]: """ A property that returns an Embeddings instance embedding_function is an instance of Embeddings, otherwise returns None. Returns: Optional[Embeddings]: The embedding function if it's an instance of Embeddings, otherwise None. """ return ( self.embedding_function if isinstance(self.embedding_function, Embeddings) else None ) def get_embedding_dimension(self) -> int: # Embed the single document by wrapping it in a list embedded_document = self._embed_documents( [self.query if self.query is not None else ""] ) # Get the first (and only) embedding's dimension return len(embedded_document[0]) def _embed_documents(self, texts: List[str]) -> List[List[float]]: if isinstance(self.embedding_function, Embeddings): return self.embedding_function.embed_documents(texts) elif callable(self.embedding_function): return [self.embedding_function(text) for text in texts] else: raise TypeError( "The embedding_function is neither Embeddings nor callable." ) def _embed_query(self, text: str) -> List[float]: if isinstance(self.embedding_function, Embeddings): return self.embedding_function.embed_query(text) else: return self.embedding_function(text) @_handle_exceptions def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids for the texts that are being added to the vector store. kwargs: vectorstore specific parameters """ texts = list(texts) if ids: # If ids are provided, hash them to maintain consistency processed_ids = [ hashlib.sha256(_id.encode()).hexdigest()[:16].upper() for _id in ids ] elif metadatas and all("id" in metadata for metadata in metadatas): # If no ids are provided but metadatas with ids are, generate # ids from metadatas processed_ids = [ hashlib.sha256(metadata["id"].encode()).hexdigest()[:16].upper() for metadata in metadatas ] else: # Generate new ids if none are provided generated_ids = [ str(uuid.uuid4()) for _ in texts ] # uuid4 is more standard for random UUIDs processed_ids = [ hashlib.sha256(_id.encode()).hexdigest()[:16].upper() for _id in generated_ids ] embeddings = self._embed_documents(texts) if not metadatas: metadatas = [{} for _ in texts] docs: List[Tuple[Any, Any, Any, Any]] if self.insert_mode == "clob": docs = [ (id_, json.dumps(embedding), json.dumps(metadata), text) for id_, embedding, metadata, text in zip( processed_ids, embeddings, metadatas, texts ) ] else: docs = [ (id_, array.array("f", embedding), json.dumps(metadata), text) for id_, embedding, metadata, text in zip( processed_ids, embeddings, metadatas, texts ) ] with self.client.cursor() as cursor: cursor.executemany( f"INSERT INTO {self.table_name} (id, embedding, metadata, " f"text) VALUES (:1, :2, :3, :4)", docs, ) self.client.commit() return processed_ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" if isinstance(self.embedding_function, Embeddings): embedding = self.embedding_function.embed_query(query) documents = self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, **kwargs ) return documents def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding=embedding, k=k, filter=filter, **kwargs ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query.""" if isinstance(self.embedding_function, Embeddings): embedding = self.embedding_function.embed_query(query) docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding=embedding, k=k, filter=filter, **kwargs ) return docs_and_scores @_handle_exceptions def _get_clob_value(self, result: Any) -> str: try: import oracledb except ImportError as e: raise ImportError( "Unable to import oracledb, please install with " "`pip install -U oracledb`." ) from e clob_value = "" if result: if isinstance(result, oracledb.LOB): raw_data = result.read() if isinstance(raw_data, bytes): clob_value = raw_data.decode( "utf-8" ) # Specify the correct encoding else: clob_value = raw_data elif isinstance(result, str): clob_value = result else: raise Exception("Unexpected type:", type(result)) return clob_value @_handle_exceptions def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = 4, filter: Optional[dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: docs_and_scores = [] embedding_arr: Any if self.insert_mode == "clob": embedding_arr = json.dumps(embedding) else: embedding_arr = array.array("f", embedding) query = f""" SELECT id, text, metadata, vector_distance(embedding, :embedding, {_get_distance_function(self.distance_strategy)}) as distance FROM {self.table_name} ORDER BY distance FETCH APPROX FIRST {k} ROWS ONLY """ # Execute the query with self.client.cursor() as cursor: cursor.execute(query, embedding=embedding_arr) results = cursor.fetchall() # Filter results if filter is provided for result in results: metadata = json.loads( self._get_clob_value(result[2]) if result[2] is not None else "{}" ) # Apply filtering based on the 'filter' dictionary if filter: if all(metadata.get(key) in value for key, value in filter.items()): doc = Document( page_content=( self._get_clob_value(result[1]) if result[1] is not None else "" ), metadata=metadata, ) distance = result[3] docs_and_scores.append((doc, distance)) else: doc = Document( page_content=( self._get_clob_value(result[1]) if result[1] is not None else "" ), metadata=metadata, ) distance = result[3] docs_and_scores.append((doc, distance)) return docs_and_scores @_handle_exceptions def similarity_search_by_vector_returning_embeddings( self, embedding: List[float], k: int, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float, np.ndarray]]: embedding_arr: Any if self.insert_mode == "clob": embedding_arr = json.dumps(embedding) else: embedding_arr = array.array("f", embedding) documents = [] query = f""" SELECT id, text, metadata, vector_distance(embedding, :embedding, {_get_distance_function( self.distance_strategy)}) as distance, embedding FROM {self.table_name} ORDER BY distance FETCH APPROX FIRST {k} ROWS ONLY """ # Execute the query with self.client.cursor() as cursor: cursor.execute(query, embedding=embedding_arr) results = cursor.fetchall() for result in results: page_content_str = self._get_clob_value(result[1]) metadata_str = self._get_clob_value(result[2]) metadata = json.loads(metadata_str) # Apply filter if provided and matches; otherwise, add all # documents if not filter or all( metadata.get(key) in value for key, value in filter.items() ): document = Document( page_content=page_content_str, metadata=metadata ) distance = result[3] # Assuming result[4] is already in the correct format; # adjust if necessary current_embedding = ( np.array(result[4], dtype=np.float32) if result[4] else np.empty(0, dtype=np.float32) ) documents.append((document, distance, current_embedding)) return documents # type: ignore @_handle_exceptions def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], *, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: self: An instance of the class embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering to pass to MMR algorithm. filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents and similarity scores selected by maximal marginal relevance and score for each. """ # Fetch documents and their scores docs_scores_embeddings = self.similarity_search_by_vector_returning_embeddings( embedding, fetch_k, filter=filter ) # Assuming documents_with_scores is a list of tuples (Document, score) # If you need to split documents and scores for processing (e.g., # for MMR calculation) documents, scores, embeddings = ( zip(*docs_scores_embeddings) if docs_scores_embeddings else ([], [], []) ) # Assume maximal_marginal_relevance method accepts embeddings and # scores, and returns indices of selected docs mmr_selected_indices = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), list(embeddings), k=k, lambda_mult=lambda_mult, ) # Filter documents based on MMR-selected indices and map scores mmr_selected_documents_with_scores = [ (documents[i], scores[i]) for i in mmr_selected_indices ] return mmr_selected_documents_with_scores @_handle_exceptions def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: self: An instance of the class embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Optional[Dict[str, Any]] **kwargs: Any Returns: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter ) return [doc for doc, _ in docs_and_scores] @_handle_exceptions def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: self: An instance of the class query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Optional[Dict[str, Any]] **kwargs Returns: List of Documents selected by maximal marginal relevance. `max_marginal_relevance_search` requires that `query` returns matched embeddings alongside the match documents. """ embedding = self._embed_query(query) documents = self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return documents @_handle_exceptions def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: self: An instance of the class ids: List of ids to delete. **kwargs """ if ids is None: raise ValueError("No ids provided to delete.") # Compute SHA-256 hashes of the ids and truncate them hashed_ids = [ hashlib.sha256(_id.encode()).hexdigest()[:16].upper() for _id in ids ] # Constructing the SQL statement with individual placeholders placeholders = ", ".join([":id" + str(i + 1) for i in range(len(hashed_ids))]) ddl = f"DELETE FROM {self.table_name} WHERE id IN ({placeholders})" # Preparing bind variables bind_vars = { f"id{i}": hashed_id for i, hashed_id in enumerate(hashed_ids, start=1) } with self.client.cursor() as cursor: cursor.execute(ddl, bind_vars) self.client.commit() @classmethod @_handle_exceptions def from_texts( cls: Type[OracleVS], texts: Iterable[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> OracleVS: """Return VectorStore initialized from texts and embeddings.""" client = kwargs.get("client") if client is None: raise ValueError("client parameter is required...") params = kwargs.get("params", {}) table_name = str(kwargs.get("table_name", "langchain")) distance_strategy = cast( DistanceStrategy, kwargs.get("distance_strategy", None) ) if not isinstance(distance_strategy, DistanceStrategy): raise TypeError( f"Expected DistanceStrategy got " f"{type(distance_strategy).__name__} " ) query = kwargs.get("query", "What is a Oracle database") drop_table_purge(client, table_name) vss = cls( client=client, embedding_function=embedding, table_name=table_name, distance_strategy=distance_strategy, query=query, params=params, ) vss.add_texts(texts=list(texts), metadatas=metadatas) return vss
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/kdbai.py
from __future__ import annotations import logging import uuid from typing import Any, Iterable, List, Optional, Tuple from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import DistanceStrategy logger = logging.getLogger(__name__) class KDBAI(VectorStore): """`KDB.AI` vector store. See https://kdb.ai. To use, you should have the `kdbai_client` python package installed. Args: table: kdbai_client.Table object to use as storage, embedding: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface, distance_strategy: One option from DistanceStrategy.EUCLIDEAN_DISTANCE, DistanceStrategy.DOT_PRODUCT or DistanceStrategy.COSINE. See the example [notebook](https://github.com/KxSystems/langchain/blob/KDB.AI/docs/docs/integrations/vectorstores/kdbai.ipynb). """ def __init__( self, table: Any, embedding: Embeddings, distance_strategy: Optional[ DistanceStrategy ] = DistanceStrategy.EUCLIDEAN_DISTANCE, ): try: import kdbai_client # noqa except ImportError: raise ImportError( "Could not import kdbai_client python package. " "Please install it with `pip install kdbai_client`." ) self._table = table self._embedding = embedding self.distance_strategy = distance_strategy @property def embeddings(self) -> Optional[Embeddings]: if isinstance(self._embedding, Embeddings): return self._embedding return None def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]: if isinstance(self._embedding, Embeddings): return self._embedding.embed_documents(list(texts)) return [self._embedding(t) for t in texts] def _embed_query(self, text: str) -> List[float]: if isinstance(self._embedding, Embeddings): return self._embedding.embed_query(text) return self._embedding(text) def _insert( self, texts: List[str], ids: Optional[List[str]], metadata: Optional[Any] = None, ) -> None: try: import numpy as np except ImportError: raise ImportError( "Could not import numpy python package. " "Please install it with `pip install numpy`." ) try: import pandas as pd except ImportError: raise ImportError( "Could not import pandas python package. " "Please install it with `pip install pandas`." ) embeds = self._embedding.embed_documents(texts) df = pd.DataFrame() df["id"] = ids df["text"] = [t.encode("utf-8") for t in texts] df["embeddings"] = [np.array(e, dtype="float32") for e in embeds] if metadata is not None: df = pd.concat([df, metadata], axis=1) self._table.insert(df, warn=False) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]]): List of metadata corresponding to each chunk of text. ids (Optional[List[str]]): List of IDs corresponding to each chunk of text. batch_size (Optional[int]): Size of batch of chunks of text to insert at once. Returns: List[str]: List of IDs of the added texts. """ try: import pandas as pd except ImportError: raise ImportError( "Could not import pandas python package. " "Please install it with `pip install pandas`." ) texts = list(texts) metadf: pd.DataFrame = None if metadatas is not None: if isinstance(metadatas, pd.DataFrame): metadf = metadatas else: metadf = pd.DataFrame(metadatas) out_ids: List[str] = [] nbatches = (len(texts) - 1) // batch_size + 1 for i in range(nbatches): istart = i * batch_size iend = (i + 1) * batch_size batch = texts[istart:iend] if ids: batch_ids = ids[istart:iend] else: batch_ids = [str(uuid.uuid4()) for _ in range(len(batch))] if metadf is not None: batch_meta = metadf.iloc[istart:iend].reset_index(drop=True) else: batch_meta = None self._insert(batch, batch_ids, batch_meta) out_ids = out_ids + batch_ids return out_ids def add_documents( self, documents: List[Document], batch_size: int = 32, **kwargs: Any ) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. batch_size (Optional[int]): Size of batch of documents to insert at once. Returns: List[str]: List of IDs of the added texts. """ try: import pandas as pd except ImportError: raise ImportError( "Could not import pandas python package. " "Please install it with `pip install pandas`." ) texts = [x.page_content for x in documents] metadata = pd.DataFrame([x.metadata for x in documents]) return self.add_texts(texts, metadata=metadata, batch_size=batch_size) def similarity_search_with_score( self, query: str, k: int = 1, filter: Optional[List] = [], **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with distance from a query string. Args: query (str): Query string. k (Optional[int]): number of neighbors to retrieve. filter (Optional[List]): KDB.AI metadata filter clause: https://code.kx.com/kdbai/use/filter.html Returns: List[Document]: List of similar documents. """ return self.similarity_search_by_vector_with_score( self._embed_query(query), k=k, filter=filter, **kwargs ) def similarity_search_by_vector_with_score( self, embedding: List[float], *, k: int = 1, filter: Optional[List] = [], **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return documents most similar to embedding, along with scores. Args: embedding (List[float]): query vector. k (Optional[int]): number of neighbors to retrieve. filter (Optional[List]): KDB.AI metadata filter clause: https://code.kx.com/kdbai/use/filter.html Returns: List[Document]: List of similar documents. """ if "n" in kwargs: k = kwargs.pop("n") matches = self._table.search(vectors=[embedding], n=k, filter=filter, **kwargs) docs: list = [] if isinstance(matches, list): matches = matches[0] else: return docs for row in matches.to_dict(orient="records"): text = row.pop("text") score = row.pop("__nn_distance") docs.append( ( Document( page_content=text, metadata={k: v for k, v in row.items() if k != "text"}, ), score, ) ) return docs def similarity_search( self, query: str, k: int = 1, filter: Optional[List] = [], **kwargs: Any, ) -> List[Document]: """Run similarity search from a query string. Args: query (str): Query string. k (Optional[int]): number of neighbors to retrieve. filter (Optional[List]): KDB.AI metadata filter clause: https://code.kx.com/kdbai/use/filter.html Returns: List[Document]: List of similar documents. """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, **kwargs ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Any, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Any: """Not implemented.""" raise Exception("Not implemented.")
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/mongodb_atlas.py
from __future__ import annotations import logging from typing import ( TYPE_CHECKING, Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, TypeVar, Union, ) import numpy as np from langchain_core._api.deprecation import deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: from pymongo.collection import Collection MongoDBDocumentType = TypeVar("MongoDBDocumentType", bound=Dict[str, Any]) logger = logging.getLogger(__name__) DEFAULT_INSERT_BATCH_SIZE = 100 @deprecated( since="0.0.25", removal="1.0", alternative_import="langchain_mongodb.MongoDBAtlasVectorSearch", ) class MongoDBAtlasVectorSearch(VectorStore): """`MongoDB Atlas Vector Search` vector store. To use, you should have both: - the ``pymongo`` python package installed - a connection string associated with a MongoDB Atlas Cluster having deployed an Atlas Search index Example: .. code-block:: python from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_community.embeddings.openai import OpenAIEmbeddings from pymongo import MongoClient mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch(collection, embeddings) """ def __init__( self, collection: Collection[MongoDBDocumentType], embedding: Embeddings, *, index_name: str = "default", text_key: str = "text", embedding_key: str = "embedding", relevance_score_fn: str = "cosine", ): """ Args: collection: MongoDB collection to add the texts to. embedding: Text embedding model to use. text_key: MongoDB field that will contain the text for each document. embedding_key: MongoDB field that will contain the embedding for each document. index_name: Name of the Atlas Search index. relevance_score_fn: The similarity score used for the index. Currently supported: Euclidean, cosine, and dot product. """ self._collection = collection self._embedding = embedding self._index_name = index_name self._text_key = text_key self._embedding_key = embedding_key self._relevance_score_fn = relevance_score_fn @property def embeddings(self) -> Embeddings: return self._embedding def _select_relevance_score_fn(self) -> Callable[[float], float]: if self._relevance_score_fn == "euclidean": return self._euclidean_relevance_score_fn elif self._relevance_score_fn == "dotProduct": return self._max_inner_product_relevance_score_fn elif self._relevance_score_fn == "cosine": return self._cosine_relevance_score_fn else: raise NotImplementedError( f"No relevance score function for ${self._relevance_score_fn}" ) @classmethod def from_connection_string( cls, connection_string: str, namespace: str, embedding: Embeddings, **kwargs: Any, ) -> MongoDBAtlasVectorSearch: """Construct a `MongoDB Atlas Vector Search` vector store from a MongoDB connection URI. Args: connection_string: A valid MongoDB connection URI. namespace: A valid MongoDB namespace (database and collection). embedding: The text embedding model to use for the vector store. Returns: A new MongoDBAtlasVectorSearch instance. """ try: from importlib.metadata import version from pymongo import MongoClient from pymongo.driver_info import DriverInfo except ImportError: raise ImportError( "Could not import pymongo, please install it with " "`pip install pymongo`." ) client: MongoClient = MongoClient( connection_string, driver=DriverInfo(name="Langchain", version=version("langchain")), ) db_name, collection_name = namespace.split(".") collection = client[db_name][collection_name] return cls(collection, embedding, **kwargs) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> List: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE) _metadatas: Union[List, Generator] = metadatas or ({} for _ in texts) texts_batch = [] metadatas_batch = [] result_ids = [] for i, (text, metadata) in enumerate(zip(texts, _metadatas)): texts_batch.append(text) metadatas_batch.append(metadata) if (i + 1) % batch_size == 0: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) texts_batch = [] metadatas_batch = [] if texts_batch: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) return result_ids def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List: if not texts: return [] # Embed and create the documents embeddings = self._embedding.embed_documents(texts) to_insert = [ {self._text_key: t, self._embedding_key: embedding, **m} for t, m, embedding in zip(texts, metadatas, embeddings) ] # insert the documents in MongoDB Atlas insert_result = self._collection.insert_many(to_insert) # type: ignore return insert_result.inserted_ids def _similarity_search_with_score( self, embedding: List[float], k: int = 4, pre_filter: Optional[Dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, ) -> List[Tuple[Document, float]]: params = { "queryVector": embedding, "path": self._embedding_key, "numCandidates": k * 10, "limit": k, "index": self._index_name, } if pre_filter: params["filter"] = pre_filter query = {"$vectorSearch": params} pipeline = [ query, {"$set": {"score": {"$meta": "vectorSearchScore"}}}, ] if post_filter_pipeline is not None: pipeline.extend(post_filter_pipeline) cursor = self._collection.aggregate(pipeline) # type: ignore[arg-type] docs = [] for res in cursor: text = res.pop(self._text_key) score = res.pop("score") docs.append((Document(page_content=text, metadata=res), score)) return docs def similarity_search_with_score( self, query: str, k: int = 4, pre_filter: Optional[Dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, ) -> List[Tuple[Document, float]]: """Return MongoDB documents most similar to the given query and their scores. Uses the vectorSearch operator available in MongoDB Atlas Search. For more: https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the vectorSearch stage. Returns: List of documents most similar to the query and their scores. """ embedding = self._embedding.embed_query(query) docs = self._similarity_search_with_score( embedding, k=k, pre_filter=pre_filter, post_filter_pipeline=post_filter_pipeline, ) return docs def similarity_search( self, query: str, k: int = 4, pre_filter: Optional[Dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, **kwargs: Any, ) -> List[Document]: """Return MongoDB documents most similar to the given query. Uses the vectorSearch operator available in MongoDB Atlas Search. For more: https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. pre_filter: (Optional) dictionary of argument(s) to prefilter document fields on. post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages following the vectorSearch stage. Returns: List of documents most similar to the query and their scores. """ additional = kwargs.get("additional") docs_and_scores = self.similarity_search_with_score( query, k=k, pre_filter=pre_filter, post_filter_pipeline=post_filter_pipeline, ) if additional and "similarity_score" in additional: for doc, score in docs_and_scores: doc.metadata["score"] = score return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, pre_filter: Optional[Dict] = None, post_filter_pipeline: Optional[List[Dict]] = None, **kwargs: Any, ) -> List[Document]: """Return documents selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: (Optional) number of documents to return. Defaults to 4. fetch_k: (Optional) number of documents to fetch before passing to MMR algorithm. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. pre_filter: (Optional) dictionary of argument(s) to prefilter on document fields. post_filter_pipeline: (Optional) pipeline of MongoDB aggregation stages following the vectorSearch stage. Returns: List of documents selected by maximal marginal relevance. """ query_embedding = self._embedding.embed_query(query) docs = self._similarity_search_with_score( query_embedding, k=fetch_k, pre_filter=pre_filter, post_filter_pipeline=post_filter_pipeline, ) mmr_doc_indexes = maximal_marginal_relevance( np.array(query_embedding), [doc.metadata[self._embedding_key] for doc, _ in docs], k=k, lambda_mult=lambda_mult, ) mmr_docs = [docs[i][0] for i in mmr_doc_indexes] return mmr_docs @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict]] = None, collection: Optional[Collection[MongoDBDocumentType]] = None, **kwargs: Any, ) -> MongoDBAtlasVectorSearch: """Construct a `MongoDB Atlas Vector Search` vector store from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided MongoDB Atlas Vector Search index (Lucene) This is intended to be a quick way to get started. Example: .. code-block:: python from pymongo import MongoClient from langchain_community.vectorstores import MongoDBAtlasVectorSearch from langchain_community.embeddings import OpenAIEmbeddings mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = MongoDBAtlasVectorSearch.from_texts( texts, embeddings, metadatas=metadatas, collection=collection ) """ if collection is None: raise ValueError("Must provide 'collection' named parameter.") vectorstore = cls(collection, embedding, **kwargs) vectorstore.add_texts(texts, metadatas=metadatas) return vectorstore
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/pgvector.py
from __future__ import annotations import contextlib import enum import json import logging import uuid from typing import ( Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Type, ) import numpy as np import sqlalchemy from langchain_core._api import deprecated, warn_deprecated from sqlalchemy import delete, func from sqlalchemy.dialects.postgresql import JSON, JSONB, UUID from sqlalchemy.orm import Session, relationship try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base try: from sqlalchemy import SQLColumnExpression except ImportError: # for sqlalchemy < 2 SQLColumnExpression = Any # type: ignore from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.runnables.config import run_in_executor from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance class DistanceStrategy(str, enum.Enum): """Enumerator of the Distance strategies.""" EUCLIDEAN = "l2" COSINE = "cosine" MAX_INNER_PRODUCT = "inner" DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE Base = declarative_base() # type: Any _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" class BaseModel(Base): """Base model for the SQL stores.""" __abstract__ = True uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) _classes: Any = None COMPARISONS_TO_NATIVE = { "$eq": "==", "$ne": "!=", "$lt": "<", "$lte": "<=", "$gt": ">", "$gte": ">=", } SPECIAL_CASED_OPERATORS = { "$in", "$nin", "$between", } TEXT_OPERATORS = { "$like", "$ilike", } LOGICAL_OPERATORS = {"$and", "$or"} SUPPORTED_OPERATORS = ( set(COMPARISONS_TO_NATIVE) .union(TEXT_OPERATORS) .union(LOGICAL_OPERATORS) .union(SPECIAL_CASED_OPERATORS) ) def _get_embedding_collection_store( vector_dimension: Optional[int] = None, *, use_jsonb: bool = True ) -> Any: global _classes if _classes is not None: return _classes from pgvector.sqlalchemy import Vector class CollectionStore(BaseModel): """Collection store.""" __tablename__ = "langchain_pg_collection" name = sqlalchemy.Column(sqlalchemy.String) cmetadata = sqlalchemy.Column(JSON) embeddings = relationship( "EmbeddingStore", back_populates="collection", passive_deletes=True, ) @classmethod def get_by_name( cls, session: Session, name: str ) -> Optional["CollectionStore"]: return session.query(cls).filter(cls.name == name).first() # type: ignore @classmethod def get_or_create( cls, session: Session, name: str, cmetadata: Optional[dict] = None, ) -> Tuple["CollectionStore", bool]: """ Get or create a collection. Returns [Collection, bool] where the bool is True if the collection was created. """ # noqa: E501 created = False collection = cls.get_by_name(session, name) if collection: return collection, created collection = cls(name=name, cmetadata=cmetadata) session.add(collection) session.commit() created = True return collection, created if use_jsonb: # TODO(PRIOR TO LANDING): Create a gin index on the cmetadata field class EmbeddingStore(BaseModel): """Embedding store.""" __tablename__ = "langchain_pg_embedding" collection_id = sqlalchemy.Column( UUID(as_uuid=True), sqlalchemy.ForeignKey( f"{CollectionStore.__tablename__}.uuid", ondelete="CASCADE", ), ) collection = relationship(CollectionStore, back_populates="embeddings") embedding: Vector = sqlalchemy.Column(Vector(vector_dimension)) document = sqlalchemy.Column(sqlalchemy.String, nullable=True) cmetadata = sqlalchemy.Column(JSONB, nullable=True) # custom_id : any user defined id custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) __table_args__ = ( sqlalchemy.Index( "ix_cmetadata_gin", "cmetadata", postgresql_using="gin", postgresql_ops={"cmetadata": "jsonb_path_ops"}, ), ) else: # For backwards comaptibilty with older versions of pgvector # This should be removed in the future (remove during migration) class EmbeddingStore(BaseModel): # type: ignore[no-redef] """Embedding store.""" __tablename__ = "langchain_pg_embedding" collection_id = sqlalchemy.Column( UUID(as_uuid=True), sqlalchemy.ForeignKey( f"{CollectionStore.__tablename__}.uuid", ondelete="CASCADE", ), ) collection = relationship(CollectionStore, back_populates="embeddings") embedding: Vector = sqlalchemy.Column(Vector(vector_dimension)) document = sqlalchemy.Column(sqlalchemy.String, nullable=True) cmetadata = sqlalchemy.Column(JSON, nullable=True) # custom_id : any user defined id custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) _classes = (EmbeddingStore, CollectionStore) return _classes def _results_to_docs(docs_and_scores: Any) -> List[Document]: """Return docs from docs and scores.""" return [doc for doc, _ in docs_and_scores] @deprecated( since="0.0.31", message=( "This class is pending deprecation and may be removed in a future version. " "You can swap to using the `PGVector` " "implementation in `langchain_postgres`. " "Please read the guidelines in the doc-string of this class " "to follow prior to migrating as there are some differences " "between the implementations. " "See <https://github.com/langchain-ai/langchain-postgres> for details about " "the new implementation." ), alternative="from langchain_postgres import PGVector;", pending=True, ) class PGVector(VectorStore): """`Postgres`/`PGVector` vector store. **DEPRECATED**: This class is pending deprecation and will likely receive no updates. An improved version of this class is available in `langchain_postgres` as `PGVector`. Please use that class instead. When migrating please keep in mind that: * The new implementation works with psycopg3, not with psycopg2 (This implementation does not work with psycopg3). * Filtering syntax has changed to use $ prefixed operators for JSONB metadata fields. (New implementation only uses JSONB field for metadata) * The new implementation made some schema changes to address issues with the existing implementation. So you will need to re-create your tables and re-index your data or else carry out a manual migration. To use, you should have the ``pgvector`` python package installed. Args: connection_string: Postgres connection string. embedding_function: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface. embedding_length: The length of the embedding vector. (default: None) NOTE: This is not mandatory. Defining it will prevent vectors of any other size to be added to the embeddings table but, without it, the embeddings can't be indexed. collection_name: The name of the collection to use. (default: langchain) NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. distance_strategy: The distance strategy to use. (default: COSINE) pre_delete_collection: If True, will delete the collection if it exists. (default: False). Useful for testing. engine_args: SQLAlchemy's create engine arguments. use_jsonb: Use JSONB instead of JSON for metadata. (default: True) Strongly discouraged from using JSON as it's not as efficient for querying. It's provided here for backwards compatibility with older versions, and will be removed in the future. create_extension: If True, will create the vector extension if it doesn't exist. disabling creation is useful when using ReadOnly Databases. Example: .. code-block:: python from langchain_community.vectorstores import PGVector from langchain_community.embeddings.openai import OpenAIEmbeddings CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3" COLLECTION_NAME = "state_of_the_union_test" embeddings = OpenAIEmbeddings() vectorestore = PGVector.from_documents( embedding=embeddings, documents=docs, collection_name=COLLECTION_NAME, connection_string=CONNECTION_STRING, use_jsonb=True, """ # noqa: E501 def __init__( self, connection_string: str, embedding_function: Embeddings, embedding_length: Optional[int] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, *, connection: Optional[sqlalchemy.engine.Connection] = None, engine_args: Optional[dict[str, Any]] = None, use_jsonb: bool = False, create_extension: bool = True, ) -> None: """Initialize the PGVector store.""" self.connection_string = connection_string self.embedding_function = embedding_function self._embedding_length = embedding_length self.collection_name = collection_name self.collection_metadata = collection_metadata self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.engine_args = engine_args or {} self._bind = connection if connection else self._create_engine() self.use_jsonb = use_jsonb self.create_extension = create_extension if not use_jsonb: # Replace with a deprecation warning. warn_deprecated( "0.0.29", pending=True, message=( "Please use JSONB instead of JSON for metadata. " "This change will allow for more efficient querying that " "involves filtering based on metadata. " "Please note that filtering operators have been changed " "when using JSONB metadata to be prefixed with a $ sign " "to avoid name collisions with columns. " "If you're using an existing database, you will need to create a " "db migration for your metadata column to be JSONB and update your " "queries to use the new operators. " ), alternative=( "Instantiate with use_jsonb=True to use JSONB instead " "of JSON for metadata." ), ) self.__post_init__() def __post_init__( self, ) -> None: """Initialize the store.""" if self.create_extension: self.create_vector_extension() EmbeddingStore, CollectionStore = _get_embedding_collection_store( self._embedding_length, use_jsonb=self.use_jsonb ) self.CollectionStore = CollectionStore self.EmbeddingStore = EmbeddingStore self.create_tables_if_not_exists() self.create_collection() def __del__(self) -> None: if isinstance(self._bind, sqlalchemy.engine.Connection): self._bind.close() @property def embeddings(self) -> Embeddings: return self.embedding_function def _create_engine(self) -> sqlalchemy.engine.Engine: return sqlalchemy.create_engine(url=self.connection_string, **self.engine_args) def create_vector_extension(self) -> None: try: with Session(self._bind) as session: # type: ignore[arg-type] # The advisor lock fixes issue arising from concurrent # creation of the vector extension. # https://github.com/langchain-ai/langchain/issues/12933 # For more information see: # https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS statement = sqlalchemy.text( "BEGIN;" "SELECT pg_advisory_xact_lock(1573678846307946496);" "CREATE EXTENSION IF NOT EXISTS vector;" "COMMIT;" ) session.execute(statement) session.commit() except Exception as e: raise Exception(f"Failed to create vector extension: {e}") from e def create_tables_if_not_exists(self) -> None: with Session(self._bind) as session, session.begin(): # type: ignore[arg-type] Base.metadata.create_all(session.get_bind()) def drop_tables(self) -> None: with Session(self._bind) as session, session.begin(): # type: ignore[arg-type] Base.metadata.drop_all(session.get_bind()) def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() with Session(self._bind) as session: # type: ignore[arg-type] self.CollectionStore.get_or_create( session, self.collection_name, cmetadata=self.collection_metadata ) def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") with Session(self._bind) as session: # type: ignore[arg-type] collection = self.get_collection(session) if not collection: self.logger.warning("Collection not found") return session.delete(collection) session.commit() @contextlib.contextmanager def _make_session(self) -> Generator[Session, None, None]: """Create a context manager for the session, bind to _conn string.""" yield Session(self._bind) # type: ignore[arg-type] def delete( self, ids: Optional[List[str]] = None, collection_only: bool = False, **kwargs: Any, ) -> None: """Delete vectors by ids or uuids. Args: ids: List of ids to delete. collection_only: Only delete ids in the collection. """ with Session(self._bind) as session: # type: ignore[arg-type] if ids is not None: self.logger.debug( "Trying to delete vectors by ids (represented by the model " "using the custom ids field)" ) stmt = delete(self.EmbeddingStore) if collection_only: collection = self.get_collection(session) if not collection: self.logger.warning("Collection not found") return stmt = stmt.where( self.EmbeddingStore.collection_id == collection.uuid ) stmt = stmt.where(self.EmbeddingStore.custom_id.in_(ids)) session.execute(stmt) session.commit() def get_collection(self, session: Session) -> Any: return self.CollectionStore.get_by_name(session, self.collection_name) @classmethod def _from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, connection_string: Optional[str] = None, pre_delete_collection: bool = False, *, use_jsonb: bool = False, **kwargs: Any, ) -> PGVector: if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] if connection_string is None: connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, use_jsonb=use_jsonb, **kwargs, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] with Session(self._bind) as session: # type: ignore[arg-type] collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") documents = [] for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = self.EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, collection_id=collection.uuid, ) documents.append(embedding_store) session.bulk_save_objects(documents) session.commit() return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding_function.embed_documents(list(texts)) return self.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with PGVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each. """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs @property def distance_strategy(self) -> Any: if self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self.EmbeddingStore.embedding.l2_distance elif self._distance_strategy == DistanceStrategy.COSINE: return self.EmbeddingStore.embedding.cosine_distance elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self.EmbeddingStore.embedding.max_inner_product else: raise ValueError( f"Got unexpected value for distance: {self._distance_strategy}. " f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}." ) def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results = self._query_collection(embedding=embedding, k=k, filter=filter) return self._results_to_docs_and_scores(results) def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]: """Return docs and scores from results.""" docs = [ ( Document( page_content=result.EmbeddingStore.document, metadata=result.EmbeddingStore.cmetadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return docs def _handle_field_filter( self, field: str, value: Any, ) -> SQLColumnExpression: """Create a filter for a specific field. Args: field: name of field value: value to filter If provided as is then this will be an equality filter If provided as a dictionary then this will be a filter, the key will be the operator and the value will be the value to filter by Returns: sqlalchemy expression """ if not isinstance(field, str): raise ValueError( f"field should be a string but got: {type(field)} with value: {field}" ) if field.startswith("$"): raise ValueError( f"Invalid filter condition. Expected a field but got an operator: " f"{field}" ) # Allow [a-zA-Z0-9_], disallow $ for now until we support escape characters if not field.isidentifier(): raise ValueError( f"Invalid field name: {field}. Expected a valid identifier." ) if isinstance(value, dict): # This is a filter specification if len(value) != 1: raise ValueError( "Invalid filter condition. Expected a value which " "is a dictionary with a single key that corresponds to an operator " f"but got a dictionary with {len(value)} keys. The first few " f"keys are: {list(value.keys())[:3]}" ) operator, filter_value = list(value.items())[0] # Verify that that operator is an operator if operator not in SUPPORTED_OPERATORS: raise ValueError( f"Invalid operator: {operator}. " f"Expected one of {SUPPORTED_OPERATORS}" ) else: # Then we assume an equality operator operator = "$eq" filter_value = value if operator in COMPARISONS_TO_NATIVE: # Then we implement an equality filter # native is trusted input native = COMPARISONS_TO_NATIVE[operator] return func.jsonb_path_match( self.EmbeddingStore.cmetadata, f"$.{field} {native} $value", json.dumps({"value": filter_value}), ) elif operator == "$between": # Use AND with two comparisons low, high = filter_value lower_bound = func.jsonb_path_match( self.EmbeddingStore.cmetadata, f"$.{field} >= $value", json.dumps({"value": low}), ) upper_bound = func.jsonb_path_match( self.EmbeddingStore.cmetadata, f"$.{field} <= $value", json.dumps({"value": high}), ) return sqlalchemy.and_(lower_bound, upper_bound) elif operator in {"$in", "$nin", "$like", "$ilike"}: # We'll do force coercion to text if operator in {"$in", "$nin"}: for val in filter_value: if not isinstance(val, (str, int, float)): raise NotImplementedError( f"Unsupported type: {type(val)} for value: {val}" ) queried_field = self.EmbeddingStore.cmetadata[field].astext if operator in {"$in"}: return queried_field.in_([str(val) for val in filter_value]) elif operator in {"$nin"}: return queried_field.not_in([str(val) for val in filter_value]) elif operator in {"$like"}: return queried_field.like(filter_value) elif operator in {"$ilike"}: return queried_field.ilike(filter_value) else: raise NotImplementedError() else: raise NotImplementedError() def _create_filter_clause_deprecated(self, key, value): # type: ignore[no-untyped-def] """Deprecated functionality. This is for backwards compatibility with the JSON based schema for metadata. It uses incorrect operator syntax (operators are not prefixed with $). This implementation is not efficient, and has bugs associated with the way that it handles numeric filter clauses. """ IN, NIN, BETWEEN, GT, LT, NE = "in", "nin", "between", "gt", "lt", "ne" EQ, LIKE, CONTAINS, OR, AND = "eq", "like", "contains", "or", "and" value_case_insensitive = {k.lower(): v for k, v in value.items()} if IN in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.in_( value_case_insensitive[IN] ) elif NIN in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.not_in( value_case_insensitive[NIN] ) elif BETWEEN in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.between( str(value_case_insensitive[BETWEEN][0]), str(value_case_insensitive[BETWEEN][1]), ) elif GT in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext > str( value_case_insensitive[GT] ) elif LT in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext < str( value_case_insensitive[LT] ) elif NE in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext != str( value_case_insensitive[NE] ) elif EQ in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext == str( value_case_insensitive[EQ] ) elif LIKE in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.like( value_case_insensitive[LIKE] ) elif CONTAINS in map(str.lower, value): filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.contains( value_case_insensitive[CONTAINS] ) elif OR in map(str.lower, value): or_clauses = [ self._create_filter_clause_deprecated(key, sub_value) for sub_value in value_case_insensitive[OR] ] filter_by_metadata = sqlalchemy.or_(*or_clauses) elif AND in map(str.lower, value): and_clauses = [ self._create_filter_clause_deprecated(key, sub_value) for sub_value in value_case_insensitive[AND] ] filter_by_metadata = sqlalchemy.and_(*and_clauses) else: filter_by_metadata = None return filter_by_metadata def _create_filter_clause_json_deprecated( self, filter: Any ) -> List[SQLColumnExpression]: """Convert filters from IR to SQL clauses. **DEPRECATED** This functionality will be deprecated in the future. It implements translation of filters for a schema that uses JSON for metadata rather than the JSONB field which is more efficient for querying. """ filter_clauses = [] for key, value in filter.items(): if isinstance(value, dict): filter_by_metadata = self._create_filter_clause_deprecated(key, value) if filter_by_metadata is not None: filter_clauses.append(filter_by_metadata) else: filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext == str( value ) filter_clauses.append(filter_by_metadata) return filter_clauses def _create_filter_clause(self, filters: Any) -> Any: """Convert LangChain IR filter representation to matching SQLAlchemy clauses. At the top level, we still don't know if we're working with a field or an operator for the keys. After we've determined that we can call the appropriate logic to handle filter creation. Args: filters: Dictionary of filters to apply to the query. Returns: SQLAlchemy clause to apply to the query. """ if isinstance(filters, dict): if len(filters) == 1: # The only operators allowed at the top level are $AND and $OR # First check if an operator or a field key, value = list(filters.items())[0] if key.startswith("$"): # Then it's an operator if key.lower() not in ["$and", "$or"]: raise ValueError( f"Invalid filter condition. Expected $and or $or " f"but got: {key}" ) else: # Then it's a field return self._handle_field_filter(key, filters[key]) # Here we handle the $and and $or operators if not isinstance(value, list): raise ValueError( f"Expected a list, but got {type(value)} for value: {value}" ) if key.lower() == "$and": and_ = [self._create_filter_clause(el) for el in value] if len(and_) > 1: return sqlalchemy.and_(*and_) elif len(and_) == 1: return and_[0] else: raise ValueError( "Invalid filter condition. Expected a dictionary " "but got an empty dictionary" ) elif key.lower() == "$or": or_ = [self._create_filter_clause(el) for el in value] if len(or_) > 1: return sqlalchemy.or_(*or_) elif len(or_) == 1: return or_[0] else: raise ValueError( "Invalid filter condition. Expected a dictionary " "but got an empty dictionary" ) else: raise ValueError( f"Invalid filter condition. Expected $and or $or " f"but got: {key}" ) elif len(filters) > 1: # Then all keys have to be fields (they cannot be operators) for key in filters.keys(): if key.startswith("$"): raise ValueError( f"Invalid filter condition. Expected a field but got: {key}" ) # These should all be fields and combined using an $and operator and_ = [self._handle_field_filter(k, v) for k, v in filters.items()] if len(and_) > 1: return sqlalchemy.and_(*and_) elif len(and_) == 1: return and_[0] else: raise ValueError( "Invalid filter condition. Expected a dictionary " "but got an empty dictionary" ) else: raise ValueError("Got an empty dictionary for filters.") else: raise ValueError( f"Invalid type: Expected a dictionary but got type: {type(filters)}" ) def _query_collection( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, ) -> List[Any]: """Query the collection.""" with Session(self._bind) as session: # type: ignore[arg-type] collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") filter_by = [self.EmbeddingStore.collection_id == collection.uuid] if filter: if self.use_jsonb: filter_clauses = self._create_filter_clause(filter) if filter_clauses is not None: filter_by.append(filter_clauses) else: # Old way of doing things filter_clauses = self._create_filter_clause_json_deprecated(filter) filter_by.extend(filter_clauses) _type = self.EmbeddingStore results: List[Any] = ( session.query( self.EmbeddingStore, self.distance_strategy(embedding).label("distance"), # type: ignore ) .filter(*filter_by) .order_by(sqlalchemy.asc("distance")) .join( self.CollectionStore, self.EmbeddingStore.collection_id == self.CollectionStore.uuid, ) .limit(k) .all() ) return results def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return _results_to_docs(docs_and_scores) @classmethod def from_texts( cls: Type[PGVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, *, use_jsonb: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls._from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, use_jsonb=use_jsonb, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """Construct PGVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain_community.vectorstores import PGVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls._from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[PGVector], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, **kwargs: Any, ) -> PGVector: """ Get instance of an existing PGVector store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="PGVECTOR_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the PGVECTOR_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[PGVector], documents: List[Document], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, *, use_jsonb: bool = False, **kwargs: Any, ) -> PGVector: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, distance_strategy=distance_strategy, metadatas=metadatas, ids=ids, collection_name=collection_name, use_jsonb=use_jsonb, **kwargs, ) @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}" def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN: return self._euclidean_relevance_score_fn elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to PGVector constructor." ) def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ results = self._query_collection(embedding=embedding, k=fetch_k, filter=filter) embedding_list = [result.EmbeddingStore.embedding for result in results] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list, k=k, lambda_mult=lambda_mult, ) candidates = self._results_to_docs_and_scores(results) return [r for i, r in enumerate(candidates) if i in mmr_selected] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) def max_marginal_relevance_search_with_score( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ embedding = self.embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_with_score_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return _results_to_docs(docs_and_scores) async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. return await run_in_executor( None, self.max_marginal_relevance_search_by_vector, embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/marqo.py
from __future__ import annotations import json import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union, ) from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: import marqo class Marqo(VectorStore): """`Marqo` vector store. Marqo indexes have their own models associated with them to generate your embeddings. This means that you can selected from a range of different models and also use CLIP models to create multimodal indexes with images and text together. Marqo also supports more advanced queries with multiple weighted terms, see See https://docs.marqo.ai/latest/#searching-using-weights-in-queries. This class can flexibly take strings or dictionaries for weighted queries in its similarity search methods. To use, you should have the `marqo` python package installed, you can do this with `pip install marqo`. Example: .. code-block:: python import marqo from langchain_community.vectorstores import Marqo client = marqo.Client(url=os.environ["MARQO_URL"], ...) vectorstore = Marqo(client, index_name) """ def __init__( self, client: marqo.Client, index_name: str, add_documents_settings: Optional[Dict[str, Any]] = None, searchable_attributes: Optional[List[str]] = None, page_content_builder: Optional[Callable[[Dict[str, Any]], str]] = None, ): """Initialize with Marqo client.""" try: import marqo except ImportError: raise ImportError( "Could not import marqo python package. " "Please install it with `pip install marqo`." ) if not isinstance(client, marqo.Client): raise ValueError( f"client should be an instance of marqo.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._add_documents_settings = ( {} if add_documents_settings is None else add_documents_settings ) self._searchable_attributes = searchable_attributes self.page_content_builder = page_content_builder self.tensor_fields = ["text"] self._document_batch_size = 1024 @property def embeddings(self) -> Optional[Embeddings]: return None def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Marqo. You can either have marqo generate ids for each document or you can provide your own by including a "_id" field in the metadata objects. Args: texts (Iterable[str]): am iterator of texts - assumed to preserve an order that matches the metadatas. metadatas (Optional[List[dict]], optional): a list of metadatas. Raises: ValueError: if metadatas is provided and the number of metadatas differs from the number of texts. Returns: List[str]: The list of ids that were added. """ settings = self._client.index(self._index_name).get_settings() if ( "index_defaults" in settings and settings["index_defaults"]["treat_urls_and_pointers_as_images"] or settings.get("treat_urls_and_pointers_as_images") ): raise ValueError( "Marqo.add_texts is disabled for multimodal indexes. To add documents " "with a multimodal index use the Python client for Marqo directly." ) documents: List[Dict[str, str]] = [] num_docs = 0 for i, text in enumerate(texts): doc = { "text": text, "metadata": json.dumps(metadatas[i]) if metadatas else json.dumps({}), } documents.append(doc) num_docs += 1 ids = [] for i in range(0, num_docs, self._document_batch_size): response = self._client.index(self._index_name).add_documents( documents[i : i + self._document_batch_size], tensor_fields=self.tensor_fields, **self._add_documents_settings, ) if response["errors"]: err_msg = ( f"Error in upload for documents in index range [{i}," f"{i + self._document_batch_size}], " f"check Marqo logs." ) raise RuntimeError(err_msg) ids += [item["_id"] for item in response["items"]] return ids def similarity_search( self, query: Union[str, Dict[str, float]], k: int = 4, **kwargs: Any, ) -> List[Document]: """Search the marqo index for the most similar documents. Args: query (Union[str, Dict[str, float]]): The query for the search, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Document]: k documents ordered from best to worst match. """ results = self.marqo_similarity_search(query=query, k=k) documents = self._construct_documents_from_results_without_score(results) return documents def similarity_search_with_score( self, query: Union[str, Dict[str, float]], k: int = 4, ) -> List[Tuple[Document, float]]: """Return documents from Marqo that are similar to the query as well as their scores. Args: query (str): The query to search with, either as a string or a weighted query. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: The matching documents and their scores, ordered by descending score. """ results = self.marqo_similarity_search(query=query, k=k) scored_documents = self._construct_documents_from_results_with_score(results) return scored_documents def bulk_similarity_search( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4, **kwargs: Any, ) -> List[List[Document]]: """Search the marqo index for the most similar documents in bulk with multiple queries. Args: queries (Iterable[Union[str, Dict[str, float]]]): An iterable of queries to execute in bulk, queries in the list can be strings or dictionaries of weighted queries. k (int, optional): The number of documents to return for each query. Defaults to 4. Returns: List[List[Document]]: A list of results for each query. """ bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k) bulk_documents: List[List[Document]] = [] for results in bulk_results["result"]: documents = self._construct_documents_from_results_without_score(results) bulk_documents.append(documents) return bulk_documents def bulk_similarity_search_with_score( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4, **kwargs: Any, ) -> List[List[Tuple[Document, float]]]: """Return documents from Marqo that are similar to the query as well as their scores using a batch of queries. Args: query (Iterable[Union[str, Dict[str, float]]]): An iterable of queries to execute in bulk, queries in the list can be strings or dictionaries of weighted queries. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Tuple[Document, float]]: A list of lists of the matching documents and their scores for each query """ bulk_results = self.marqo_bulk_similarity_search(queries=queries, k=k) bulk_documents: List[List[Tuple[Document, float]]] = [] for results in bulk_results["result"]: documents = self._construct_documents_from_results_with_score(results) bulk_documents.append(documents) return bulk_documents def _construct_documents_from_results_with_score( self, results: Dict[str, List[Dict[str, str]]] ) -> List[Tuple[Document, Any]]: """Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Tuple[Document, Any]] = [] for res in results["hits"]: if self.page_content_builder is None: text = res["text"] else: text = self.page_content_builder(res) metadata = json.loads(res.get("metadata", "{}")) documents.append( (Document(page_content=text, metadata=metadata), res["_score"]) ) return documents def _construct_documents_from_results_without_score( self, results: Dict[str, List[Dict[str, str]]] ) -> List[Document]: """Helper to convert Marqo results into documents. Args: results (List[dict]): A marqo results object with the 'hits'. include_scores (bool, optional): Include scores alongside documents. Defaults to False. Returns: Union[List[Document], List[Tuple[Document, float]]]: The documents or document score pairs if `include_scores` is true. """ documents: List[Document] = [] for res in results["hits"]: if self.page_content_builder is None: text = res["text"] else: text = self.page_content_builder(res) metadata = json.loads(res.get("metadata", "{}")) documents.append(Document(page_content=text, metadata=metadata)) return documents def marqo_similarity_search( self, query: Union[str, Dict[str, float]], k: int = 4, ) -> Dict[str, List[Dict[str, str]]]: """Return documents from Marqo exposing Marqo's output directly Args: query (str): The query to search with. k (int, optional): The number of documents to return. Defaults to 4. Returns: List[Dict[str, Any]]: This hits from marqo. """ results = self._client.index(self._index_name).search( q=query, searchable_attributes=self._searchable_attributes, limit=k ) return results def marqo_bulk_similarity_search( self, queries: Iterable[Union[str, Dict[str, float]]], k: int = 4 ) -> Dict[str, List[Dict[str, List[Dict[str, str]]]]]: """Return documents from Marqo using a bulk search, exposes Marqo's output directly Args: queries (Iterable[Union[str, Dict[str, float]]]): A list of queries. k (int, optional): The number of documents to return for each query. Defaults to 4. Returns: Dict[str, Dict[List[Dict[str, Dict[str, Any]]]]]: A bulk search results object """ bulk_results = { "result": [ self._client.index(self._index_name).search( q=query, searchable_attributes=self._searchable_attributes, limit=k ) for query in queries ] } return bulk_results @classmethod def from_documents( cls: Type[Marqo], documents: List[Document], embedding: Union[Embeddings, None] = None, **kwargs: Any, ) -> Marqo: """Return VectorStore initialized from documents. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. Args: documents (List[Document]): Input documents embedding (Any, optional): Embeddings (not required). Defaults to None. Returns: VectorStore: A Marqo vectorstore """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts(texts, metadatas=metadatas, **kwargs) @classmethod def from_texts( cls, texts: List[str], embedding: Any = None, metadatas: Optional[List[dict]] = None, index_name: str = "", url: str = "http://localhost:8882", api_key: str = "", add_documents_settings: Optional[Dict[str, Any]] = None, searchable_attributes: Optional[List[str]] = None, page_content_builder: Optional[Callable[[Dict[str, str]], str]] = None, index_settings: Optional[Dict[str, Any]] = None, verbose: bool = True, **kwargs: Any, ) -> Marqo: """Return Marqo initialized from texts. Note that Marqo does not need embeddings, we retain the parameter to adhere to the Liskov substitution principle. This is a quick way to get started with marqo - simply provide your texts and metadatas and this will create an instance of the data store and index the provided data. To know the ids of your documents with this approach you will need to include them in under the key "_id" in your metadatas for each text Example: .. code-block:: python from langchain_community.vectorstores import Marqo datastore = Marqo(texts=['text'], index_name='my-first-index', url='http://localhost:8882') Args: texts (List[str]): A list of texts to index into marqo upon creation. embedding (Any, optional): Embeddings (not required). Defaults to None. index_name (str, optional): The name of the index to use, if none is provided then one will be created with a UUID. Defaults to None. url (str, optional): The URL for Marqo. Defaults to "http://localhost:8882". api_key (str, optional): The API key for Marqo. Defaults to "". metadatas (Optional[List[dict]], optional): A list of metadatas, to accompany the texts. Defaults to None. this is only used when a new index is being created. Defaults to "cpu". Can be "cpu" or "cuda". add_documents_settings (Optional[Dict[str, Any]], optional): Settings for adding documents, see https://docs.marqo.ai/0.0.16/API-Reference/documents/#query-parameters. Defaults to {}. index_settings (Optional[Dict[str, Any]], optional): Index settings if the index doesn't exist, see https://docs.marqo.ai/0.0.16/API-Reference/indexes/#index-defaults-object. Defaults to {}. Returns: Marqo: An instance of the Marqo vector store """ try: import marqo except ImportError: raise ImportError( "Could not import marqo python package. " "Please install it with `pip install marqo`." ) if not index_name: index_name = str(uuid.uuid4()) client = marqo.Client(url=url, api_key=api_key) try: client.create_index(index_name, settings_dict=index_settings or {}) if verbose: print(f"Created {index_name} successfully.") # noqa: T201 except Exception: if verbose: print(f"Index {index_name} exists.") # noqa: T201 instance: Marqo = cls( client, index_name, searchable_attributes=searchable_attributes, add_documents_settings=add_documents_settings or {}, page_content_builder=page_content_builder, ) instance.add_texts(texts, metadatas) return instance def get_indexes(self) -> List[Dict[str, str]]: """Helper to see your available indexes in marqo, useful if the from_texts method was used without an index name specified Returns: List[Dict[str, str]]: The list of indexes """ return self._client.get_indexes()["results"] def get_number_of_documents(self) -> int: """Helper to see the number of documents in the index Returns: int: The number of documents """ return self._client.index(self._index_name).get_stats()["numberOfDocuments"]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/deeplake.py
from __future__ import annotations import logging from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np try: import deeplake from deeplake import VectorStore as DeepLakeVectorStore from deeplake.core.fast_forwarding import version_compare from deeplake.util.exceptions import SampleExtendError _DEEPLAKE_INSTALLED = True except ImportError: _DEEPLAKE_INSTALLED = False from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class DeepLake(VectorStore): """`Activeloop Deep Lake` vector store. We integrated deeplake's similarity search and filtering for fast prototyping. Now, it supports Tensor Query Language (TQL) for production use cases over billion rows. Why Deep Lake? - Not only stores embeddings, but also the original data with version control. - Serverless, doesn't require another service and can be used with major cloud providers (S3, GCS, etc.) - More than just a multi-modal vector store. You can use the dataset to fine-tune your own LLM models. To use, you should have the ``deeplake`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import DeepLake from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = DeepLake("langchain_store", embeddings.embed_query) """ _LANGCHAIN_DEFAULT_DEEPLAKE_PATH: str = "./deeplake/" _valid_search_kwargs = ["lambda_mult"] def __init__( self, dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, token: Optional[str] = None, embedding: Optional[Embeddings] = None, embedding_function: Optional[Embeddings] = None, read_only: bool = False, ingestion_batch_size: int = 1024, num_workers: int = 0, verbose: bool = True, exec_option: Optional[str] = None, runtime: Optional[Dict] = None, index_params: Optional[Dict[str, Union[int, str]]] = None, **kwargs: Any, ) -> None: """Creates an empty DeepLakeVectorStore or loads an existing one. The DeepLakeVectorStore is located at the specified ``path``. Examples: >>> # Create a vector store with default tensors >>> deeplake_vectorstore = DeepLake( ... path = <path_for_storing_Data>, ... ) >>> >>> # Create a vector store in the Deep Lake Managed Tensor Database >>> data = DeepLake( ... path = "hub://org_id/dataset_name", ... runtime = {"tensor_db": True}, ... ) Args: dataset_path (str): The full path for storing to the Deep Lake Vector Store. It can be: - a Deep Lake cloud path of the form ``hub://org_id/dataset_name``. Requires registration with Deep Lake. - an s3 path of the form ``s3://bucketname/path/to/dataset``. Credentials are required in either the environment or passed to the creds argument. - a local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``. - a memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset but keeps it in memory instead. Should be used only for testing as it does not persist. Defaults to _LANGCHAIN_DEFAULT_DEEPLAKE_PATH. token (str, optional): Activeloop token, for fetching credentials to the dataset at path if it is a Deep Lake dataset. Tokens are normally autogenerated. Optional. embedding (Embeddings, optional): Function to convert either documents or query. Optional. embedding_function (Embeddings, optional): Function to convert either documents or query. Optional. Deprecated: keeping this parameter for backwards compatibility. read_only (bool): Open dataset in read-only mode. Default is False. ingestion_batch_size (int): During data ingestion, data is divided into batches. Batch size is the size of each batch. Default is 1024. num_workers (int): Number of workers to use during data ingestion. Default is 0. verbose (bool): Print dataset summary after each operation. Default is True. exec_option (str, optional): Default method for search execution. It could be either ``"auto"``, ``"python"``, ``"compute_engine"`` or ``"tensor_db"``. Defaults to ``"auto"``. If None, it's set to "auto". - ``auto``- Selects the best execution method based on the storage location of the Vector Store. It is the default option. - ``python`` - Pure-python implementation that runs on the client and can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged because it can lead to memory issues. - ``compute_engine`` - Performant C++ implementation of the Deep Lake Compute Engine that runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - ``tensor_db`` - Performant and fully-hosted Managed Tensor Database that is responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. Store datasets in this database by specifying runtime = {"tensor_db": True} during dataset creation. runtime (Dict, optional): Parameters for creating the Vector Store in Deep Lake's Managed Tensor Database. Not applicable when loading an existing Vector Store. To create a Vector Store in the Managed Tensor Database, set `runtime = {"tensor_db": True}`. index_params (Optional[Dict[str, Union[int, str]]], optional): Dictionary containing information about vector index that will be created. Defaults to None, which will utilize ``DEFAULT_VECTORSTORE_INDEX_PARAMS`` from ``deeplake.constants``. The specified key-values override the default ones. - threshold: The threshold for the dataset size above which an index will be created for the embedding tensor. When the threshold value is set to -1, index creation is turned off. Defaults to -1, which turns off the index. - distance_metric: This key specifies the method of calculating the distance between vectors when creating the vector database (VDB) index. It can either be a string that corresponds to a member of the DistanceType enumeration, or the string value itself. - If no value is provided, it defaults to "L2". - "L2" corresponds to DistanceType.L2_NORM. - "COS" corresponds to DistanceType.COSINE_SIMILARITY. - additional_params: Additional parameters for fine-tuning the index. **kwargs: Other optional keyword arguments. Raises: ValueError: If some condition is not met. """ self.ingestion_batch_size = ingestion_batch_size self.num_workers = num_workers self.verbose = verbose if _DEEPLAKE_INSTALLED is False: raise ImportError( "Could not import deeplake python package. " "Please install it with `pip install deeplake[enterprise]`." ) if ( runtime == {"tensor_db": True} and version_compare(deeplake.__version__, "3.6.7") == -1 ): raise ImportError( "To use tensor_db option you need to update deeplake to `3.6.7` or " "higher. " f"Currently installed deeplake version is {deeplake.__version__}. " ) self.dataset_path = dataset_path if embedding_function: logger.warning( "Using embedding function is deprecated and will be removed " "in the future. Please use embedding instead." ) self.vectorstore = DeepLakeVectorStore( path=self.dataset_path, embedding_function=embedding_function or embedding, read_only=read_only, token=token, exec_option=exec_option, verbose=verbose, runtime=runtime, index_params=index_params, **kwargs, ) self._embedding_function = embedding_function or embedding self._id_tensor_name = "ids" if "ids" in self.vectorstore.tensors() else "id" @property def embeddings(self) -> Optional[Embeddings]: return self._embedding_function def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Examples: >>> ids = deeplake_vectorstore.add_texts( ... texts = <list_of_texts>, ... metadatas = <list_of_metadata_jsons>, ... ids = <list_of_ids>, ... ) Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. embedding_function (Optional[Embeddings], optional): Embedding function to use to convert the text into embeddings. **kwargs (Any): Any additional keyword arguments passed is not supported by this method. Returns: List[str]: List of IDs of the added texts. """ self._validate_kwargs(kwargs, "add_texts") kwargs = {} if ids: if self._id_tensor_name == "ids": # for backwards compatibility kwargs["ids"] = ids else: kwargs["id"] = ids if metadatas is None: metadatas = [{}] * len(list(texts)) if not isinstance(texts, list): texts = list(texts) if texts is None: raise ValueError("`texts` parameter shouldn't be None.") elif len(texts) == 0: raise ValueError("`texts` parameter shouldn't be empty.") try: return self.vectorstore.add( text=texts, metadata=metadatas, embedding_data=texts, embedding_tensor="embedding", embedding_function=self._embedding_function.embed_documents, # type: ignore return_ids=True, **kwargs, ) except SampleExtendError as e: if "Failed to append a sample to the tensor 'metadata'" in str(e): msg = ( "**Hint: You might be using invalid type of argument in " "document loader (e.g. 'pathlib.PosixPath' instead of 'str')" ) raise ValueError(e.args[0] + "\n\n" + msg) else: raise e def _search_tql( self, tql: Optional[str], exec_option: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Function for performing tql_search. Args: tql (str): TQL Query string for direct evaluation. Available only for `compute_engine` and `tensor_db`. exec_option (str, optional): Supports 3 ways to search. Could be "python", "compute_engine" or "tensor_db". Default is "python". - ``python`` - Pure-python implementation for the client. WARNING: not recommended for big datasets due to potential memory issues. - ``compute_engine`` - C++ implementation of Deep Lake Compute Engine for the client. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database for storage and query execution. Only for data in Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. return_score (bool): Return score with document. Default is False. Returns: Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists. The first list contains Documents, and the second list contains tuples of Document and float score. Raises: ValueError: If return_score is True but some condition is not met. """ result = self.vectorstore.search( query=tql, exec_option=exec_option, ) metadatas = result["metadata"] texts = result["text"] docs = [ Document( page_content=text, metadata=metadata, ) for text, metadata in zip(texts, metadatas) ] if kwargs: unsupported_argument = next(iter(kwargs)) if kwargs[unsupported_argument] is not False: raise ValueError( f"specifying {unsupported_argument} is " "not supported with tql search." ) return docs def _search( self, query: Optional[str] = None, embedding: Optional[Union[List[float], np.ndarray]] = None, embedding_function: Optional[Callable] = None, k: int = 4, distance_metric: Optional[str] = None, use_maximal_marginal_relevance: bool = False, fetch_k: Optional[int] = 20, filter: Optional[Union[Dict, Callable]] = None, return_score: bool = False, exec_option: Optional[str] = None, deep_memory: bool = False, **kwargs: Any, ) -> Any[List[Document], List[Tuple[Document, float]]]: """ Return docs similar to query. Args: query (str, optional): Text to look up similar docs. embedding (Union[List[float], np.ndarray], optional): Query's embedding. embedding_function (Callable, optional): Function to convert `query` into embedding. k (int): Number of Documents to return. distance_metric (Optional[str], optional): `L2` for Euclidean, `L1` for Nuclear, `max` for L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. filter (Union[Dict, Callable], optional): Additional filter prior to the embedding search. - ``Dict`` - Key-value search on tensors of htype json, on an AND basis (a sample must satisfy all key-value filters to be True) Dict = {"tensor_name_1": {"key": value}, "tensor_name_2": {"key": value}} - ``Function`` - Any function compatible with `deeplake.filter`. use_maximal_marginal_relevance (bool): Use maximal marginal relevance. fetch_k (int): Number of Documents for MMR algorithm. return_score (bool): Return the score. exec_option (str, optional): Supports 3 ways to perform searching. Could be "python", "compute_engine" or "tensor_db". - ``python`` - Pure-python implementation for the client. WARNING: not recommended for big datasets. - ``compute_engine`` - C++ implementation of Deep Lake Compute Engine for the client. Not for in-memory or local datasets. - ``tensor_db`` - Hosted Managed Tensor Database for storage and query execution. Only for data in Deep Lake Managed Database. Use runtime = {"db_engine": True} during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. kwargs: Additional keyword arguments. Returns: List of Documents by the specified distance metric, if return_score True, return a tuple of (Document, score) Raises: ValueError: if both `embedding` and `embedding_function` are not specified. """ if kwargs.get("tql_query"): logger.warning("`tql_query` is deprecated. Please use `tql` instead.") kwargs["tql"] = kwargs.pop("tql_query") if kwargs.get("tql"): return self._search_tql( tql=kwargs["tql"], exec_option=exec_option, return_score=return_score, embedding=embedding, embedding_function=embedding_function, distance_metric=distance_metric, use_maximal_marginal_relevance=use_maximal_marginal_relevance, filter=filter, ) self._validate_kwargs(kwargs, "search") if embedding_function: if isinstance(embedding_function, Embeddings): _embedding_function = embedding_function.embed_query else: _embedding_function = embedding_function elif self._embedding_function: _embedding_function = self._embedding_function.embed_query else: _embedding_function = None if embedding is None: if _embedding_function is None: raise ValueError( "Either `embedding` or `embedding_function` needs to be" " specified." ) embedding = _embedding_function(query) if query else None if isinstance(embedding, list): embedding = np.array(embedding, dtype=np.float32) if len(embedding.shape) > 1: embedding = embedding[0] result = self.vectorstore.search( embedding=embedding, k=fetch_k if use_maximal_marginal_relevance else k, distance_metric=distance_metric, filter=filter, exec_option=exec_option, return_tensors=["embedding", "metadata", "text", self._id_tensor_name], deep_memory=deep_memory, ) scores = result["score"] embeddings = result["embedding"] metadatas = result["metadata"] texts = result["text"] if use_maximal_marginal_relevance: lambda_mult = kwargs.get("lambda_mult", 0.5) indices = maximal_marginal_relevance( # type: ignore embedding, # type: ignore embeddings, k=min(k, len(texts)), lambda_mult=lambda_mult, ) scores = [scores[i] for i in indices] texts = [texts[i] for i in indices] metadatas = [metadatas[i] for i in indices] docs = [ Document( page_content=text, metadata=metadata, ) for text, metadata in zip(texts, metadatas) ] if return_score: if not isinstance(scores, list): scores = [scores] return [(doc, score) for doc, score in zip(docs, scores)] return docs def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """ Return docs most similar to query. Examples: >>> # Search using an embedding >>> data = vector_store.similarity_search( ... query=<your_query>, ... k=<num_items>, ... exec_option=<preferred_exec_option>, ... ) >>> # Run tql search: >>> data = vector_store.similarity_search( ... query=None, ... tql="SELECT * WHERE id == <id>", ... exec_option="compute_engine", ... ) Args: k (int): Number of Documents to return. Defaults to 4. query (str): Text to look up similar documents. kwargs: Additional keyword arguments include: embedding (Callable): Embedding function to use. Defaults to None. distance_metric (str): 'L2' for Euclidean, 'L1' for Nuclear, 'max' for L-infinity, 'cos' for cosine, 'dot' for dot product. Defaults to 'L2'. filter (Union[Dict, Callable], optional): Additional filter before embedding search. - Dict: Key-value search on tensors of htype json, (sample must satisfy all key-value filters) Dict = {"tensor_1": {"key": value}, "tensor_2": {"key": value}} - Function: Compatible with `deeplake.filter`. Defaults to None. exec_option (str): Supports 3 ways to perform searching. 'python', 'compute_engine', or 'tensor_db'. Defaults to 'python'. - 'python': Pure-python implementation for the client. WARNING: not recommended for big datasets. - 'compute_engine': C++ implementation of the Compute Engine for the client. Not for in-memory or local datasets. - 'tensor_db': Managed Tensor Database for storage and query. Only for data in Deep Lake Managed Database. Use `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Document]: List of Documents most similar to the query vector. """ return self._search( query=query, k=k, use_maximal_marginal_relevance=False, return_score=False, **kwargs, ) def similarity_search_by_vector( self, embedding: Union[List[float], np.ndarray], k: int = 4, **kwargs: Any, ) -> List[Document]: """ Return docs most similar to embedding vector. Examples: >>> # Search using an embedding >>> data = vector_store.similarity_search_by_vector( ... embedding=<your_embedding>, ... k=<num_items_to_return>, ... exec_option=<preferred_exec_option>, ... ) Args: embedding (Union[List[float], np.ndarray]): Embedding to find similar docs. k (int): Number of Documents to return. Defaults to 4. kwargs: Additional keyword arguments including: filter (Union[Dict, Callable], optional): Additional filter before embedding search. - ``Dict`` - Key-value search on tensors of htype json. True if all key-value filters are satisfied. Dict = {"tensor_name_1": {"key": value}, "tensor_name_2": {"key": value}} - ``Function`` - Any function compatible with `deeplake.filter`. Defaults to None. exec_option (str): Options for search execution include "python", "compute_engine", or "tensor_db". Defaults to "python". - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. distance_metric (str): `L2` for Euclidean, `L1` for Nuclear, `max` for L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. Defaults to `L2`. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Document]: List of Documents most similar to the query vector. """ return self._search( embedding=embedding, k=k, use_maximal_marginal_relevance=False, return_score=False, **kwargs, ) def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Run similarity search with Deep Lake with distance returned. Examples: >>> data = vector_store.similarity_search_with_score( ... query=<your_query>, ... embedding=<your_embedding_function> ... k=<number_of_items_to_return>, ... exec_option=<preferred_exec_option>, ... ) Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. kwargs: Additional keyword arguments. Some of these arguments are: distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity distance, `cos` for cosine similarity, 'dot' for dot product. Defaults to `L2`. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. embedding_function (Callable): Embedding function to use. Defaults to None. exec_option (str): DeepLakeVectorStore supports 3 ways to perform searching. It could be either "python", "compute_engine" or "tensor_db". Defaults to "python". - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float.""" return self._search( query=query, k=k, return_score=True, **kwargs, ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, exec_option: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """ Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected docs. Examples: >>> data = vector_store.max_marginal_relevance_search_by_vector( ... embedding=<your_embedding>, ... fetch_k=<elements_to_fetch_before_mmr_search>, ... k=<number_of_items_to_return>, ... exec_option=<preferred_exec_option>, ... ) Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch for MMR algorithm. lambda_mult: Number between 0 and 1 determining the degree of diversity. 0 corresponds to max diversity and 1 to min diversity. Defaults to 0.5. exec_option (str): DeepLakeVectorStore supports 3 ways for searching. Could be "python", "compute_engine" or "tensor_db". Defaults to "python". - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. kwargs: Additional keyword arguments. Returns: List[Documents] - A list of documents. """ return self._search( embedding=embedding, k=k, fetch_k=fetch_k, use_maximal_marginal_relevance=True, lambda_mult=lambda_mult, exec_option=exec_option, **kwargs, ) def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, exec_option: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Examples: >>> # Search using an embedding >>> data = vector_store.max_marginal_relevance_search( ... query = <query_to_search>, ... embedding_function = <embedding_function_for_query>, ... k = <number_of_items_to_return>, ... exec_option = <preferred_exec_option>, ... ) Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents for MMR algorithm. lambda_mult: Value between 0 and 1. 0 corresponds to maximum diversity and 1 to minimum. Defaults to 0.5. exec_option (str): Supports 3 ways to perform searching. - "python" - Pure-python implementation running on the client. Can be used for data stored anywhere. WARNING: using this option with big datasets is discouraged due to potential memory issues. - "compute_engine" - Performant C++ implementation of the Deep Lake Compute Engine. Runs on the client and can be used for any data stored in or connected to Deep Lake. It cannot be used with in-memory or local datasets. - "tensor_db" - Performant, fully-hosted Managed Tensor Database. Responsible for storage and query execution. Only available for data stored in the Deep Lake Managed Database. To store datasets in this database, specify `runtime = {"db_engine": True}` during dataset creation. deep_memory (bool): Whether to use the Deep Memory model for improving search results. Defaults to False if deep_memory is not specified in the Vector Store initialization. If True, the distance metric is set to "deepmemory_distance", which represents the metric with which the model was trained. The search is performed using the Deep Memory model. If False, the distance metric is set to "COS" or whatever distance metric user specifies. kwargs: Additional keyword arguments Returns: List of Documents selected by maximal marginal relevance. Raises: ValueError: when MRR search is on but embedding function is not specified. """ embedding_function = kwargs.get("embedding") or self._embedding_function if embedding_function is None: raise ValueError( "For MMR search, you must specify an embedding function on" " `creation` or during add call." ) return self._search( query=query, k=k, fetch_k=fetch_k, use_maximal_marginal_relevance=True, lambda_mult=lambda_mult, exec_option=exec_option, embedding_function=embedding_function, # type: ignore **kwargs, ) @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, **kwargs: Any, ) -> DeepLake: """Create a Deep Lake dataset from a raw documents. If a dataset_path is specified, the dataset will be persisted in that location, otherwise by default at `./deeplake` Examples: >>> # Search using an embedding >>> vector_store = DeepLake.from_texts( ... texts = <the_texts_that_you_want_to_embed>, ... embedding_function = <embedding_function_for_query>, ... k = <number_of_items_to_return>, ... exec_option = <preferred_exec_option>, ... ) Args: dataset_path (str): - The full path to the dataset. Can be: - Deep Lake cloud path of the form ``hub://username/dataset_name``. To write to Deep Lake cloud datasets, ensure that you are logged in to Deep Lake (use 'activeloop login' from command line) - AWS S3 path of the form ``s3://bucketname/path/to/dataset``. Credentials are required in either the environment - Google Cloud Storage path of the form ``gcs://bucketname/path/to/dataset`` Credentials are required in either the environment - Local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``. - In-memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset, but keeps it in memory instead. Should be used only for testing as it does not persist. texts (List[Document]): List of documents to add. embedding (Optional[Embeddings]): Embedding function. Defaults to None. Note, in other places, it is called embedding_function. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. kwargs: Additional keyword arguments. Returns: DeepLake: Deep Lake dataset. """ deeplake_dataset = cls(dataset_path=dataset_path, embedding=embedding, **kwargs) deeplake_dataset.add_texts( texts=texts, metadatas=metadatas, ids=ids, ) return deeplake_dataset def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> bool: """Delete the entities in the dataset. Args: ids (Optional[List[str]], optional): The document_ids to delete. Defaults to None. **kwargs: Other keyword arguments that subclasses might use. - filter (Optional[Dict[str, str]], optional): The filter to delete by. - delete_all (Optional[bool], optional): Whether to drop the dataset. Returns: bool: Whether the delete operation was successful. """ filter = kwargs.get("filter") delete_all = kwargs.get("delete_all") self.vectorstore.delete(ids=ids, filter=filter, delete_all=delete_all) return True @classmethod def force_delete_by_path(cls, path: str) -> None: """Force delete dataset by path. Args: path (str): path of the dataset to delete. Raises: ValueError: if deeplake is not installed. """ try: import deeplake except ImportError: raise ImportError( "Could not import deeplake python package. " "Please install it with `pip install deeplake`." ) deeplake.delete(path, large_ok=True, force=True) def delete_dataset(self) -> None: """Delete the collection.""" self.delete(delete_all=True) def ds(self) -> Any: logger.warning( "this method is deprecated and will be removed, " "better to use `db.vectorstore.dataset` instead." ) return self.vectorstore.dataset @classmethod def _validate_kwargs(cls, kwargs, method_name): # type: ignore[no-untyped-def] if kwargs: valid_items = cls._get_valid_args(method_name) unsupported_items = cls._get_unsupported_items(kwargs, valid_items) if unsupported_items: raise TypeError( f"`{unsupported_items}` are not a valid " f"argument to {method_name} method" ) @classmethod def _get_valid_args(cls, method_name): # type: ignore[no-untyped-def] if method_name == "search": return cls._valid_search_kwargs else: return [] @staticmethod def _get_unsupported_items(kwargs, valid_items): # type: ignore[no-untyped-def] kwargs = {k: v for k, v in kwargs.items() if k not in valid_items} unsupported_items = None if kwargs: unsupported_items = "`, `".join(set(kwargs.keys())) return unsupported_items
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/apache_doris.py
from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from pydantic_settings import BaseSettings, SettingsConfigDict logger = logging.getLogger() DEBUG = False class ApacheDorisSettings(BaseSettings): """Apache Doris client configuration. Attributes: apache_doris_host (str) : An URL to connect to frontend. Defaults to 'localhost'. apache_doris_port (int) : URL port to connect with HTTP. Defaults to 9030. username (str) : Username to login. Defaults to 'root'. password (str) : Password to login. Defaults to None. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'langchain'. column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python { 'id': 'text_id', 'embedding': 'text_embedding', 'document': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 9030 username: str = "root" password: str = "" column_map: Dict[str, str] = { "id": "id", "document": "document", "embedding": "embedding", "metadata": "metadata", } database: str = "default" table: str = "langchain" def __getitem__(self, item: str) -> Any: return getattr(self, item) model_config = SettingsConfigDict( env_file=".env", env_file_encoding="utf-8", env_prefix="apache_doris_", extra="ignore", ) class ApacheDoris(VectorStore): """`Apache Doris` vector store. You need a `pymysql` python package, and a valid account to connect to Apache Doris. For more information, please visit [Apache Doris official site](https://doris.apache.org/) [Apache Doris github](https://github.com/apache/doris) """ def __init__( self, embedding: Embeddings, *, config: Optional[ApacheDorisSettings] = None, **kwargs: Any, ) -> None: """Constructor for Apache Doris. Args: embedding (Embeddings): Text embedding model. config (ApacheDorisSettings): Apache Doris client configuration information. """ try: import pymysql # type: ignore[import] except ImportError: raise ImportError( "Could not import pymysql python package. " "Please install it with `pip install pymysql`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x, **kwargs: x super().__init__() if config is not None: self.config = config else: self.config = ApacheDorisSettings() assert self.config assert self.config.host and self.config.port assert self.config.column_map and self.config.database and self.config.table for k in ["id", "embedding", "document", "metadata"]: assert k in self.config.column_map # initialize the schema dim = len(embedding.embed_query("test")) self.schema = f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} varchar(50), {self.config.column_map['document']} string, {self.config.column_map['embedding']} array<float>, {self.config.column_map['metadata']} string ) ENGINE = OLAP UNIQUE KEY(id) DISTRIBUTED BY HASH(id) \ PROPERTIES ("replication_allocation" = "tag.location.default: 1")\ """ self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self._embedding = embedding self.dist_order = "DESC" _debug_output(self.config) # Create a connection to Apache Doris self.connection = pymysql.connect( host=self.config.host, port=self.config.port, user=self.config.username, password=self.config.password, database=self.config.database, **kwargs, ) _debug_output(self.schema) _get_named_result(self.connection, self.schema) def escape_str(self, value: str) -> str: return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) @property def embeddings(self) -> Embeddings: return self._embedding def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str: ks = ",".join(column_names) embed_tuple_index = tuple(column_names).index( self.config.column_map["embedding"] ) _data = [] for n in transac: n = ",".join( [ ( f"'{self.escape_str(str(_n))}'" if idx != embed_tuple_index else f"{str(_n)}" ) for (idx, _n) in enumerate(n) ] ) _data.append(f"({n})") i_str = f""" INSERT INTO {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None: _insert_query = self._build_insert_sql(transac, column_names) _debug_output(_insert_query) _get_named_result(self.connection, _insert_query) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any, ) -> List[str]: """Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ # Embed and create the documents ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = { colmap_["id"]: ids, colmap_["document"]: texts, colmap_["embedding"]: self._embedding.embed_documents(list(texts)), } metadatas = metadatas or [{} for _ in texts] column_names[colmap_["metadata"]] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar( zip(*values), desc="Inserting data...", total=len(metadatas) ): assert ( len(v[keys.index(self.config.column_map["embedding"])]) == self.dim ) transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[ApacheDorisSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> ApacheDoris: """Create Apache Doris wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (ApacheDorisSettings, Optional): Apache Doris configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): BatchSize when transmitting data to Apache Doris. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Returns: Apache Doris Index """ ctx = cls(embedding, config=config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx def __repr__(self) -> str: """Text representation for Apache Doris Vector Store, prints frontends, username and schemas. Easy to use with `str(ApacheDoris())` Returns: repr: string to show connection info and data schema """ _repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ " _repr += f"{self.config.host}:{self.config.port}\033[0m\n\n" _repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" width = 25 fields = 3 _repr += "-" * (width * fields + 1) + "\n" columns = ["name", "type", "key"] _repr += f"|\033[94m{columns[0]:24s}\033[0m|\033[96m{columns[1]:24s}" _repr += f"\033[0m|\033[96m{columns[2]:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" q_str = f"DESC {self.config.database}.{self.config.table}" _debug_output(q_str) rs = _get_named_result(self.connection, q_str) for r in rs: _repr += f"|\033[94m{r['Field']:24s}\033[0m|\033[96m{r['Type']:24s}" _repr += f"\033[0m|\033[96m{r['Key']:24s}\033[0m|\n" _repr += "-" * (width * fields + 1) + "\n" return _repr def _build_query_sql( self, q_emb: List[float], topk: int, where_str: Optional[str] = None ) -> str: q_emb_str = ",".join(map(str, q_emb)) if where_str: where_str = f"WHERE {where_str}" else: where_str = "" q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, cosine_distance(array<float>[{q_emb_str}], {self.config.column_map['embedding']}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} LIMIT {topk} """ _debug_output(q_str) return q_str def similarity_search( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Document]: """Perform a similarity search with Apache Doris Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector( self._embedding.embed_query(query), k, where_str, **kwargs ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search with Apache Doris by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_query_sql(embedding, k, where_str) try: return [ Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ) for r in _get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a similarity search with Apache Doris Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql(self._embedding.embed_query(query), k, where_str) try: return [ ( Document( page_content=r[self.config.column_map["document"]], metadata=json.loads(r[self.config.column_map["metadata"]]), ), r["dist"], ) for r in _get_named_result(self.connection, q_str) ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] def drop(self) -> None: """ Helper function: Drop data """ _get_named_result( self.connection, f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}", ) @property def metadata_column(self) -> str: return self.config.column_map["metadata"] def _has_mul_sub_str(s: str, *args: Any) -> bool: """Check if a string has multiple substrings. Args: s: The string to check *args: The substrings to check for in the string Returns: bool: True if all substrings are present in the string, False otherwise """ for a in args: if a not in s: return False return True def _debug_output(s: Any) -> None: """Print a debug message if DEBUG is True. Args: s: The message to print """ if DEBUG: print(s) # noqa: T201 def _get_named_result(connection: Any, query: str) -> List[dict[str, Any]]: """Get a named result from a query. Args: connection: The connection to the database query: The query to execute Returns: List[dict[str, Any]]: The result of the query """ cursor = connection.cursor() cursor.execute(query) columns = cursor.description result = [] for value in cursor.fetchall(): r = {} for idx, datum in enumerate(value): k = columns[idx][0] r[k] = datum result.append(r) _debug_output(result) cursor.close() return result
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/relyt.py
from __future__ import annotations import logging import uuid from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type from sqlalchemy import Column, String, Table, create_engine, insert, text from sqlalchemy.dialects.postgresql import JSON, TEXT try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore _LANGCHAIN_DEFAULT_EMBEDDING_DIM = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain_document" Base = declarative_base() # type: Any class Relyt(VectorStore): """`Relyt` (distributed PostgreSQL) vector store. Relyt is a distributed full postgresql syntax cloud-native database. - `connection_string` is a postgres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, engine_args: Optional[dict] = None, ) -> None: """Initialize a PGVecto_rs vectorstore. Args: embedding: Embeddings to use. dimension: Dimension of the embeddings. db_url: Database URL. collection_name: Name of the collection. new_table: Whether to create a new table or connect to an existing one. If true, the table will be dropped if exists, then recreated. Defaults to False. """ try: from pgvecto_rs.sdk import PGVectoRs PGVectoRs( db_url=connection_string, collection_name=collection_name, dimension=embedding_dimension, recreate=pre_delete_collection, ) except ImportError as e: raise ImportError( "Unable to import pgvector_rs.sdk , please install with " '`pip install "pgvecto_rs[sdk]"`.' ) from e self.connection_string = connection_string self.embedding_function = embedding_function self.embedding_dimension = embedding_dimension self.collection_name = collection_name self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.__post_init__(engine_args) def __post_init__( self, engine_args: Optional[dict] = None, ) -> None: """ Initialize the store. """ _engine_args = engine_args or {} if ( "pool_recycle" not in _engine_args ): # Check if pool_recycle is not in _engine_args _engine_args["pool_recycle"] = ( 3600 # Set pool_recycle to 3600s if not present ) self.engine = create_engine(self.connection_string, **_engine_args) self.create_collection() @property def embeddings(self) -> Embeddings: return self.embedding_function def _select_relevance_score_fn(self) -> Callable[[float], float]: return self._euclidean_relevance_score_fn def create_table_if_not_exists(self) -> None: # Define the dynamic table """ Table( self.collection_name, Base.metadata, Column("id", TEXT, primary_key=True, default=uuid.uuid4), Column("embedding", Vector(self.embedding_dimension)), Column("document", String, nullable=True), Column("metadata", JSON, nullable=True), extend_existing=True, ) """ with self.engine.connect() as conn: with conn.begin(): # create vectors conn.execute(text("CREATE EXTENSION IF NOT EXISTS vectors")) conn.execute(text('CREATE EXTENSION IF NOT EXISTS "uuid-ossp"')) # Create the table # Base.metadata.create_all(conn) table_name = f"{self.collection_name}" table_query = text( f""" SELECT 1 FROM pg_class WHERE relname = '{table_name}'; """ ) result = conn.execute(table_query).scalar() if not result: table_statement = text( f""" CREATE TABLE {table_name} ( id TEXT PRIMARY KEY DEFAULT uuid_generate_v4(), embedding vector({self.embedding_dimension}), document TEXT, metadata JSON ) USING heap; """ ) conn.execute(table_statement) # Check if the index exists index_name = f"{self.collection_name}_embedding_idx" index_query = text( f""" SELECT 1 FROM pg_indexes WHERE indexname = '{index_name}'; """ ) result = conn.execute(index_query).scalar() # Create the index if it doesn't exist if not result: index_statement = text( f""" CREATE INDEX {index_name} ON {self.collection_name} USING vectors (embedding vector_l2_ops) WITH (options = $$ optimizing.optimizing_threads = 30 segment.max_growing_segment_size = 600 segment.max_sealed_segment_size = 30000000 [indexing.hnsw] m=30 ef_construction=500 $$); """ ) conn.execute(index_statement) def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() self.create_table_if_not_exists() def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") drop_statement = text(f"DROP TABLE IF EXISTS {self.collection_name};") with self.engine.connect() as conn: with conn.begin(): conn.execute(drop_statement) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 500, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ from pgvecto_rs.sqlalchemy import Vector if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] # Define the table schema chunks_table = Table( self.collection_name, Base.metadata, Column("id", TEXT, primary_key=True), Column("embedding", Vector(self.embedding_dimension)), Column("document", String, nullable=True), Column("metadata", JSON, nullable=True), extend_existing=True, ) chunks_table_data = [] with self.engine.connect() as conn: with conn.begin(): for document, metadata, chunk_id, embedding in zip( texts, metadatas, ids, embeddings ): chunks_table_data.append( { "id": chunk_id, "embedding": embedding, "document": document, "metadata": metadata, } ) # Execute the batch insert when the batch size is reached if len(chunks_table_data) == batch_size: conn.execute(insert(chunks_table).values(chunks_table_data)) # Clear the chunks_table_data list for the next batch chunks_table_data.clear() # Insert any remaining records that didn't make up a full batch if chunks_table_data: conn.execute(insert(chunks_table).values(chunks_table_data)) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with AnalyticDB with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: # Add the filter if provided try: from sqlalchemy.engine import Row except ImportError: raise ImportError( "Could not import Row from sqlalchemy.engine. " "Please 'pip install sqlalchemy>=1.4'." ) filter_condition = "" if filter is not None: conditions = [ f"metadata->>{key!r} = {value!r}" for key, value in filter.items() ] filter_condition = f"WHERE {' AND '.join(conditions)}" # Define the base query sql_query = f""" set vectors.enable_search_growing = on; set vectors.enable_search_write = on; SELECT document, metadata, embedding <-> :embedding as distance FROM {self.collection_name} {filter_condition} ORDER BY embedding <-> :embedding LIMIT :k """ # Set up the query parameters embedding_str = ", ".join(format(x) for x in embedding) embedding_str = "[" + embedding_str + "]" params = {"embedding": embedding_str, "k": k} # Execute the query and fetch the results with self.engine.connect() as conn: results: Sequence[Row] = conn.execute(text(sql_query), params).fetchall() documents_with_scores = [ ( Document( page_content=result.document, metadata=result.metadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return documents_with_scores def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector IDs. Args: ids: List of ids to delete. """ from pgvecto_rs.sqlalchemy import Vector if ids is None: raise ValueError("No ids provided to delete.") # Define the table schema chunks_table = Table( self.collection_name, Base.metadata, Column("id", TEXT, primary_key=True), Column("embedding", Vector(self.embedding_dimension)), Column("document", String, nullable=True), Column("metadata", JSON, nullable=True), extend_existing=True, ) try: with self.engine.connect() as conn: with conn.begin(): delete_condition = chunks_table.c.id.in_(ids) conn.execute(chunks_table.delete().where(delete_condition)) return True except Exception as e: print("Delete operation failed:", str(e)) # noqa: T201 return False @classmethod def from_texts( cls: Type[Relyt], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, engine_args: Optional[dict] = None, **kwargs: Any, ) -> Relyt: """ Return VectorStore initialized from texts and embeddings. Postgres Connection string is required Either pass it as a parameter or set the PG_CONNECTION_STRING environment variable. """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, embedding_dimension=embedding_dimension, pre_delete_collection=pre_delete_collection, engine_args=engine_args, ) store.add_texts(texts=texts, metadatas=metadatas, ids=ids, **kwargs) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="PG_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the PG_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[Relyt], documents: List[Document], embedding: Embeddings, embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, engine_args: Optional[dict] = None, **kwargs: Any, ) -> Relyt: """ Return VectorStore initialized from documents and embeddings. Postgres Connection string is required Either pass it as a parameter or set the PG_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, embedding_dimension=embedding_dimension, metadatas=metadatas, ids=ids, collection_name=collection_name, engine_args=engine_args, **kwargs, ) @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/vdms.py
from __future__ import annotations import base64 import logging import os import uuid from copy import deepcopy from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Optional, Sized, Tuple, Type, Union, get_args, ) import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import vdms DISTANCE_METRICS = Literal[ "L2", # Euclidean Distance "IP", # Inner Product ] AVAILABLE_DISTANCE_METRICS: List[DISTANCE_METRICS] = list(get_args(DISTANCE_METRICS)) ENGINES = Literal[ "TileDBDense", # TileDB Dense "TileDBSparse", # TileDB Sparse "FaissFlat", # FAISS IndexFlat "FaissIVFFlat", # FAISS IndexIVFFlat "Flinng", # FLINNG ] AVAILABLE_ENGINES: List[ENGINES] = list(get_args(ENGINES)) DEFAULT_COLLECTION_NAME = "langchain" DEFAULT_INSERT_BATCH_SIZE = 32 # Number of Documents to return. DEFAULT_K = 3 # Number of Documents to fetch to pass to knn when filters applied. DEFAULT_FETCH_K = DEFAULT_K * 5 DEFAULT_PROPERTIES = ["_distance", "id", "content"] INVALID_DOC_METADATA_KEYS = ["_distance", "content", "blob"] INVALID_METADATA_VALUE = ["Missing property", None, {}] # type: List logger = logging.getLogger(__name__) def _len_check_if_sized(x: Any, y: Any, x_name: str, y_name: str) -> None: """ Check that sizes of two variables are the same Args: x: Variable to compare y: Variable to compare x_name: Name for variable x y_name: Name for variable y """ if isinstance(x, Sized) and isinstance(y, Sized) and len(x) != len(y): raise ValueError( f"{x_name} and {y_name} expected to be equal length but " f"len({x_name})={len(x)} and len({y_name})={len(y)}" ) return def _results_to_docs(results: Any) -> List[Document]: return [doc for doc, _ in _results_to_docs_and_scores(results)] def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]: final_res: List[Any] = [] try: responses, blobs = results[0] if ( len(responses) > 0 and "FindDescriptor" in responses[0] and "entities" in responses[0]["FindDescriptor"] ): result_entities = responses[0]["FindDescriptor"]["entities"] # result_blobs = blobs for ent in result_entities: distance = round(ent["_distance"], 10) txt_contents = ent["content"] for p in INVALID_DOC_METADATA_KEYS: if p in ent: del ent[p] props = { mkey: mval for mkey, mval in ent.items() if mval not in INVALID_METADATA_VALUE } final_res.append( (Document(page_content=txt_contents, metadata=props), distance) ) except Exception as e: logger.warning(f"No results returned. Error while parsing results: {e}") return final_res def VDMS_Client(host: str = "localhost", port: int = 55555) -> vdms.vdms: """VDMS client for the VDMS server. Args: host: IP or hostname of VDMS server port: Port to connect to VDMS server """ try: import vdms except ImportError: raise ImportError( "Could not import vdms python package. " "Please install it with `pip install vdms." ) client = vdms.vdms() client.connect(host, port) return client class VDMS(VectorStore): """Intel Lab's VDMS for vector-store workloads. To use, you should have both: - the ``vdms`` python package installed - a host (str) and port (int) associated with a deployed VDMS Server Visit https://github.com/IntelLabs/vdms/wiki more information. IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. Args: client: VDMS Client used to connect to VDMS server collection_name: Name of data collection [Default: langchain] distance_strategy: Method used to calculate distances. VDMS supports "L2" (euclidean distance) or "IP" (inner product) [Default: L2] engine: Underlying implementation for indexing and computing distances. VDMS supports TileDBDense, TileDBSparse, FaissFlat, FaissIVFFlat, and Flinng [Default: FaissFlat] embedding: Any embedding function implementing `langchain_core.embeddings.Embeddings` interface. relevance_score_fn: Function for obtaining relevance score Example: .. code-block:: python from langchain_huggingface import HuggingFaceEmbeddings from langchain_community.vectorstores.vdms import VDMS, VDMS_Client model_name = "sentence-transformers/all-mpnet-base-v2" vectorstore = VDMS( client=VDMS_Client("localhost", 55555), embedding=HuggingFaceEmbeddings(model_name=model_name), collection_name="langchain-demo", distance_strategy="L2", engine="FaissFlat", ) """ def __init__( self, client: vdms.vdms, *, embedding: Optional[Embeddings] = None, collection_name: str = DEFAULT_COLLECTION_NAME, # DescriptorSet name distance_strategy: DISTANCE_METRICS = "L2", engine: ENGINES = "FaissFlat", relevance_score_fn: Optional[Callable[[float], float]] = None, embedding_dimensions: Optional[int] = None, ) -> None: # Check required parameters self._client = client self.similarity_search_engine = engine self.distance_strategy = distance_strategy self.embedding = embedding self._check_required_inputs(collection_name, embedding_dimensions) # Update other parameters self.override_relevance_score_fn = relevance_score_fn # Initialize collection self._collection_name = self.add_set( collection_name, engine=self.similarity_search_engine, metric=self.distance_strategy, ) @property def embeddings(self) -> Optional[Embeddings]: return self.embedding def _embed_documents(self, texts: List[str]) -> List[List[float]]: if isinstance(self.embedding, Embeddings): return self.embedding.embed_documents(texts) else: p_str = "Must provide `embedding` which is expected" p_str += " to be an Embeddings object" raise ValueError(p_str) def _embed_video(self, paths: List[str], **kwargs: Any) -> List[List[float]]: if self.embedding is not None and hasattr(self.embedding, "embed_video"): return self.embedding.embed_video(paths=paths, **kwargs) else: raise ValueError( "Must provide `embedding` which has attribute `embed_video`" ) def _embed_image(self, uris: List[str]) -> List[List[float]]: if self.embedding is not None and hasattr(self.embedding, "embed_image"): return self.embedding.embed_image(uris=uris) else: raise ValueError( "Must provide `embedding` which has attribute `embed_image`" ) def _embed_query(self, text: str) -> List[float]: if isinstance(self.embedding, Embeddings): return self.embedding.embed_query(text) else: raise ValueError( "Must provide `embedding` which is expected" " to be an Embeddings object" ) def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self.distance_strategy.lower() in ["ip", "l2"]: return lambda x: x else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self.distance_strategy}." "Consider providing relevance_score_fn to VDMS constructor." ) def _similarity_search_with_relevance_scores( self, query: str, k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores on a scale from 0 to 1.""" if self.override_relevance_score_fn is None: kwargs["normalize_distance"] = True docs_and_scores = self.similarity_search_with_score( query=query, k=k, fetch_k=fetch_k, filter=filter, **kwargs, ) docs_and_rel_scores: List[Any] = [] for doc, score in docs_and_scores: if self.override_relevance_score_fn is None: docs_and_rel_scores.append((doc, score)) else: docs_and_rel_scores.append( (doc, self.override_relevance_score_fn(score)) ) return docs_and_rel_scores def add( self, collection_name: str, texts: List[str], embeddings: List[List[float]], metadatas: Optional[Union[List[None], List[Dict[str, Any]]]] = None, ids: Optional[List[str]] = None, ) -> List: _len_check_if_sized(texts, embeddings, "texts", "embeddings") metadatas = metadatas if metadatas is not None else [None for _ in texts] _len_check_if_sized(texts, metadatas, "texts", "metadatas") ids = ids if ids is not None else [str(uuid.uuid4()) for _ in texts] _len_check_if_sized(texts, ids, "texts", "ids") all_queries: List[Any] = [] all_blobs: List[Any] = [] inserted_ids: List[Any] = [] for meta, emb, doc, id in zip(metadatas, embeddings, texts, ids): query, blob = self.__get_add_query( collection_name, metadata=meta, embedding=emb, document=doc, id=id ) if blob is not None: all_queries.append(query) all_blobs.append(blob) inserted_ids.append(id) response, response_array = self.__run_vdms_query(all_queries, all_blobs) return inserted_ids def add_set( self, collection_name: str, engine: ENGINES = "FaissFlat", metric: DISTANCE_METRICS = "L2", ) -> str: query = _add_descriptorset( "AddDescriptorSet", collection_name, self.embedding_dimension, engine=getattr(engine, "value", engine), metric=getattr(metric, "value", metric), ) response, _ = self.__run_vdms_query([query]) if "FailedCommand" in response[0]: raise ValueError(f"Failed to add collection {collection_name}") return collection_name def __delete( self, collection_name: str, ids: Union[None, List[str]] = None, constraints: Union[None, Dict[str, Any]] = None, ) -> bool: """ Deletes entire collection if id is not provided """ all_queries: List[Any] = [] all_blobs: List[Any] = [] collection_properties = self.__get_properties(collection_name) results = {"list": collection_properties} if constraints is None: constraints = {"_deletion": ["==", 1]} else: constraints["_deletion"] = ["==", 1] if ids is not None: constraints["id"] = ["==", ids[0]] # if len(ids) > 1 else ids[0]] query = _add_descriptor( "FindDescriptor", collection_name, label=None, ref=None, props=None, link=None, k_neighbors=None, constraints=constraints, results=results, ) all_queries.append(query) response, response_array = self.__run_vdms_query(all_queries, all_blobs) # Update/store indices after deletion query = _add_descriptorset( "FindDescriptorSet", collection_name, storeIndex=True ) responseSet, _ = self.__run_vdms_query([query], all_blobs) return "FindDescriptor" in response[0] def __get_add_query( self, collection_name: str, metadata: Optional[Any] = None, embedding: Union[List[float], None] = None, document: Optional[Any] = None, id: Optional[str] = None, ) -> Tuple[Dict[str, Dict[str, Any]], Union[bytes, None]]: if id is None: props: Dict[str, Any] = {} else: props = {"id": id} id_exists, query = _check_descriptor_exists_by_id( self._client, collection_name, id ) if id_exists: skipped_value = { prop_key: prop_val[-1] for prop_key, prop_val in query["FindDescriptor"][ "constraints" ].items() } pstr = f"[!] Embedding with id ({id}) exists in DB;" pstr += "Therefore, skipped and not inserted" print(pstr) # noqa: T201 print(f"\tSkipped values are: {skipped_value}") # noqa: T201 return query, None if metadata: props.update(metadata) if document not in [None, ""]: props["content"] = document for k in props.keys(): if k not in self.collection_properties: self.collection_properties.append(k) query = _add_descriptor( "AddDescriptor", collection_name, label=None, ref=None, props=props, link=None, k_neighbors=None, constraints=None, results=None, ) blob = embedding2bytes(embedding) return ( query, blob, ) def __get_properties( self, collection_name: str, unique_entity: Optional[bool] = False, deletion: Optional[bool] = False, ) -> List[str]: find_query = _find_property_entity( collection_name, unique_entity=unique_entity, deletion=deletion ) response, response_blob = self.__run_vdms_query([find_query]) if len(response_blob) > 0: collection_properties = _bytes2str(response_blob[0]).split(",") else: collection_properties = deepcopy(DEFAULT_PROPERTIES) return collection_properties def __run_vdms_query( self, all_queries: List[Dict], all_blobs: Optional[List] = [], print_last_response: Optional[bool] = False, ) -> Tuple[Any, Any]: response, response_array = self._client.query(all_queries, all_blobs) _ = _check_valid_response(all_queries, response) if print_last_response: self._client.print_last_response() return response, response_array def __update( self, collection_name: str, ids: List[str], documents: List[str], embeddings: List[List[float]], metadatas: Optional[Union[List[None], List[Dict[str, Any]]]] = None, ) -> None: """ Updates (find, delete, add) a collection based on id. If more than one collection returned with id, error occuers """ _len_check_if_sized(ids, documents, "ids", "documents") _len_check_if_sized(ids, embeddings, "ids", "embeddings") metadatas = metadatas if metadatas is not None else [None for _ in ids] _len_check_if_sized(ids, metadatas, "ids", "metadatas") orig_props = self.__get_properties(collection_name) updated_ids: List[Any] = [] for meta, emb, doc, id in zip(metadatas, embeddings, documents, ids): results = {"list": self.collection_properties} constraints = {"_deletion": ["==", 1]} if id is not None: constraints["id"] = ["==", id] query = _add_descriptor( "FindDescriptor", collection_name, label=None, ref=None, props=None, link=None, k_neighbors=None, constraints=constraints, results=results, ) response, response_array = self.__run_vdms_query([query]) query, blob = self.__get_add_query( collection_name, metadata=meta, embedding=emb, document=doc, id=id, ) if blob is not None: response, response_array = self.__run_vdms_query([query], [blob]) updated_ids.append(id) self.__update_properties( collection_name, orig_props, self.collection_properties ) def __update_properties( self, collection_name: str, current_collection_properties: List, new_collection_properties: Optional[List], ) -> None: if new_collection_properties is not None: old_collection_properties = deepcopy(current_collection_properties) for prop in new_collection_properties: if prop not in current_collection_properties: current_collection_properties.append(prop) if current_collection_properties != old_collection_properties: all_queries, blob_arr = _build_property_query( collection_name, command_type="update", all_properties=current_collection_properties, ) response, _ = self.__run_vdms_query(all_queries, [blob_arr]) def add_images( self, uris: List[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = DEFAULT_INSERT_BATCH_SIZE, add_path: Optional[bool] = True, **kwargs: Any, ) -> List[str]: """Run more images through the embeddings and add to the vectorstore. Images are added as embeddings (AddDescriptor) instead of separate entity (AddImage) within VDMS to leverage similarity search capability Args: uris: List of paths to the images to add to the vectorstore. metadatas: Optional list of metadatas associated with the images. ids: Optional list of unique IDs. batch_size (int): Number of concurrent requests to send to the server. add_path: Bool to add image path as metadata Returns: List of ids from adding images into the vectorstore. """ # Map from uris to blobs to base64 b64_texts = [self.encode_image(image_path=uri) for uri in uris] if add_path and metadatas: for midx, uri in enumerate(uris): metadatas[midx]["image_path"] = uri elif add_path: metadatas = [] for uri in uris: metadatas.append({"image_path": uri}) # Populate IDs ids = ids if ids is not None else [str(uuid.uuid4()) for _ in uris] # Set embeddings embeddings = self._embed_image(uris=uris) if metadatas is None: metadatas = [{} for _ in uris] else: metadatas = [_validate_vdms_properties(m) for m in metadatas] self.add_from( texts=b64_texts, embeddings=embeddings, ids=ids, metadatas=metadatas, batch_size=batch_size, **kwargs, ) return ids def add_videos( self, paths: List[str], texts: Optional[List[str]] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 1, add_path: Optional[bool] = True, **kwargs: Any, ) -> List[str]: """Run videos through the embeddings and add to the vectorstore. Videos are added as embeddings (AddDescriptor) instead of separate entity (AddVideo) within VDMS to leverage similarity search capability Args: paths: List of paths to the videos to add to the vectorstore. metadatas: Optional list of text associated with the videos. metadatas: Optional list of metadatas associated with the videos. ids: Optional list of unique IDs. batch_size (int): Number of concurrent requests to send to the server. add_path: Bool to add video path as metadata Returns: List of ids from adding videos into the vectorstore. """ if texts is None: texts = ["" for _ in paths] if add_path and metadatas: for midx, path in enumerate(paths): metadatas[midx]["video_path"] = path elif add_path: metadatas = [] for path in paths: metadatas.append({"video_path": path}) # Populate IDs ids = ids if ids is not None else [str(uuid.uuid4()) for _ in paths] # Set embeddings embeddings = self._embed_video(paths=paths, **kwargs) if metadatas is None: metadatas = [{} for _ in paths] self.add_from( texts=texts, embeddings=embeddings, ids=ids, metadatas=metadatas, batch_size=batch_size, **kwargs, ) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = DEFAULT_INSERT_BATCH_SIZE, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: List of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. batch_size (int): Number of concurrent requests to send to the server. Returns: List of ids from adding the texts into the vectorstore. """ texts = list(texts) if ids is None: ids = [str(uuid.uuid4()) for _ in texts] embeddings = self._embed_documents(texts) if metadatas is None: metadatas = [{} for _ in texts] else: metadatas = [_validate_vdms_properties(m) for m in metadatas] inserted_ids = self.add_from( texts=texts, embeddings=embeddings, ids=ids, metadatas=metadatas, batch_size=batch_size, **kwargs, ) return inserted_ids def add_from( self, texts: List[str], embeddings: List[List[float]], ids: List[str], metadatas: Optional[List[dict]] = None, batch_size: int = DEFAULT_INSERT_BATCH_SIZE, **kwargs: Any, ) -> List[str]: # Get initial properties orig_props = self.__get_properties(self._collection_name) inserted_ids: List[str] = [] for start_idx in range(0, len(texts), batch_size): end_idx = min(start_idx + batch_size, len(texts)) batch_texts = texts[start_idx:end_idx] batch_embedding_vectors = embeddings[start_idx:end_idx] batch_ids = ids[start_idx:end_idx] if metadatas: batch_metadatas = metadatas[start_idx:end_idx] result = self.add( self._collection_name, embeddings=batch_embedding_vectors, texts=batch_texts, metadatas=batch_metadatas, ids=batch_ids, ) inserted_ids.extend(result) # Update Properties self.__update_properties( self._collection_name, orig_props, self.collection_properties ) return inserted_ids def _check_required_inputs( self, collection_name: str, embedding_dimensions: Union[int, None] ) -> None: # Check connection to client if not self._client.is_connected(): raise ValueError( "VDMS client must be connected to a VDMS server." + "Please use VDMS_Client to establish a connection" ) # Check Distance Metric if self.distance_strategy not in AVAILABLE_DISTANCE_METRICS: raise ValueError("distance_strategy must be either 'L2' or 'IP'") # Check Engines if self.similarity_search_engine not in AVAILABLE_ENGINES: raise ValueError( "engine must be either 'TileDBDense', 'TileDBSparse', " + "'FaissFlat', 'FaissIVFFlat', or 'Flinng'" ) # Check Embedding Func is provided and store dimension size if self.embedding is None: raise ValueError("Must provide embedding function") if embedding_dimensions is not None: self.embedding_dimension = embedding_dimensions elif self.embedding is not None and hasattr(self.embedding, "embed_query"): self.embedding_dimension = len( self._embed_query("This is a sample sentence.") ) elif self.embedding is not None and ( hasattr(self.embedding, "embed_image") or hasattr(self.embedding, "embed_video") ): if hasattr(self.embedding, "model"): try: self.embedding_dimension = ( self.embedding.model.token_embedding.embedding_dim ) except ValueError: raise ValueError( "Embedding dimension needed. Please define embedding_dimensions" ) else: raise ValueError( "Embedding dimension needed. Please define embedding_dimensions" ) # Check for properties current_props = self.__get_properties(collection_name) if hasattr(self, "collection_properties"): self.collection_properties.extend(current_props) else: self.collection_properties: List[str] = current_props def count(self, collection_name: str) -> int: all_queries: List[Any] = [] all_blobs: List[Any] = [] results = {"count": "", "list": ["id"]} # collection_properties} query = _add_descriptor( "FindDescriptor", collection_name, label=None, ref=None, props=None, link=None, k_neighbors=None, constraints=None, results=results, ) all_queries.append(query) response, response_array = self.__run_vdms_query(all_queries, all_blobs) return response[0]["FindDescriptor"]["returned"] def decode_image(self, base64_image: str) -> bytes: return base64.b64decode(base64_image) def delete( self, ids: Optional[List[str]] = None, collection_name: Optional[str] = None, constraints: Optional[Dict] = None, **kwargs: Any, ) -> bool: """Delete by ID. These are the IDs in the vectorstore. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ name = collection_name if collection_name is not None else self._collection_name return self.__delete(name, ids=ids, constraints=constraints) def get_k_candidates( self, setname: str, fetch_k: Optional[int], results: Optional[Dict[str, Any]] = None, all_blobs: Optional[List] = None, normalize: Optional[bool] = False, ) -> Tuple[List[Dict[str, Any]], List, float]: max_dist = 1 command_str = "FindDescriptor" query = _add_descriptor( command_str, setname, k_neighbors=fetch_k, results=results, ) response, response_array = self.__run_vdms_query([query], all_blobs) if normalize and command_str in response[0]: max_dist = response[0][command_str]["entities"][-1]["_distance"] return response, response_array, max_dist def get_descriptor_response( self, command_str: str, setname: str, k_neighbors: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, constraints: Optional[dict] = None, results: Optional[Dict[str, Any]] = None, query_embedding: Optional[List[float]] = None, normalize_distance: bool = False, ) -> Tuple[List[Dict[str, Any]], List]: all_blobs: List[Any] = [] blob = embedding2bytes(query_embedding) if blob is not None: all_blobs.append(blob) if constraints is None: # K results returned response, response_array, max_dist = self.get_k_candidates( setname, k_neighbors, results, all_blobs, normalize=normalize_distance ) else: if results is None: results = {"list": ["id"]} elif "list" not in results: results["list"] = ["id"] elif "id" not in results["list"]: results["list"].append("id") # (1) Find docs satisfy constraints query = _add_descriptor( command_str, setname, constraints=constraints, results=results, ) response, response_array = self.__run_vdms_query([query]) if command_str in response[0] and response[0][command_str]["returned"] > 0: ids_of_interest = [ ent["id"] for ent in response[0][command_str]["entities"] ] else: return [], [] # (2) Find top fetch_k results response, response_array, max_dist = self.get_k_candidates( setname, fetch_k, results, all_blobs, normalize=normalize_distance ) if command_str not in response[0] or ( command_str in response[0] and response[0][command_str]["returned"] == 0 ): return [], [] # (3) Intersection of (1) & (2) using ids new_entities: List[Dict] = [] for ent in response[0][command_str]["entities"]: if ent["id"] in ids_of_interest: new_entities.append(ent) if len(new_entities) == k_neighbors: break response[0][command_str]["entities"] = new_entities response[0][command_str]["returned"] = len(new_entities) if len(new_entities) < k_neighbors: p_str = "Returned items < k_neighbors; Try increasing fetch_k" print(p_str) # noqa: T201 if normalize_distance: max_dist = 1.0 if max_dist in [0, np.inf] else max_dist for ent_idx, ent in enumerate(response[0][command_str]["entities"]): ent["_distance"] = ent["_distance"] / max_dist response[0][command_str]["entities"][ent_idx]["_distance"] = ent[ "_distance" ] return response, response_array def encode_image(self, image_path: str) -> str: with open(image_path, "rb") as f: blob = f.read() return base64.b64encode(blob).decode("utf-8") @classmethod def from_documents( cls: Type[VDMS], documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, batch_size: int = DEFAULT_INSERT_BATCH_SIZE, collection_name: str = DEFAULT_COLLECTION_NAME, # Add this line **kwargs: Any, ) -> VDMS: """Create a VDMS vectorstore from a list of documents. Args: collection_name (str): Name of the collection to create. documents (List[Document]): List of documents to add to vectorstore. embedding (Embeddings): Embedding function. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. batch_size (int): Number of concurrent requests to send to the server. Returns: VDMS: VDMS vectorstore. """ client: vdms.vdms = kwargs["client"] return cls.from_texts( client=client, texts=[doc.page_content for doc in documents], metadatas=[doc.metadata for doc in documents], embedding=embedding, ids=ids, batch_size=batch_size, collection_name=collection_name, # **kwargs, ) @classmethod def from_texts( cls: Type[VDMS], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = DEFAULT_INSERT_BATCH_SIZE, collection_name: str = DEFAULT_COLLECTION_NAME, **kwargs: Any, ) -> VDMS: """Create a VDMS vectorstore from a raw documents. Args: texts (List[str]): List of texts to add to the collection. embedding (Embeddings): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. batch_size (int): Number of concurrent requests to send to the server. collection_name (str): Name of the collection to create. Returns: VDMS: VDMS vectorstore. """ client: vdms.vdms = kwargs["client"] vdms_collection = cls( collection_name=collection_name, embedding=embedding, client=client, # **kwargs, ) if ids is None: ids = [str(uuid.uuid4()) for _ in texts] vdms_collection.add_texts( texts=texts, metadatas=metadatas, ids=ids, batch_size=batch_size, # **kwargs ) return vdms_collection def get( self, collection_name: str, constraints: Optional[Dict] = None, limit: Optional[int] = None, include: List[str] = ["metadata"], ) -> Tuple[Any, Any]: """Gets the collection. Get embeddings and their associated data from the data store. If no constraints provided returns all embeddings up to limit. Args: constraints: A dict used to filter results by. E.g. `{"color" : ["==", "red"], "price": [">", 4.00]}`. Optional. limit: The number of documents to return. Optional. include: A list of what to include in the results. Can contain `"embeddings"`, `"metadatas"`, `"documents"`. Ids are always included. Defaults to `["metadatas", "documents"]`. Optional. """ all_queries: List[Any] = [] all_blobs: List[Any] = [] results: Dict[str, Any] = {"count": ""} if limit is not None: results["limit"] = limit # Include metadata if "metadata" in include: collection_properties = self.__get_properties(collection_name) results["list"] = collection_properties # Include embedding if "embeddings" in include: results["blob"] = True query = _add_descriptor( "FindDescriptor", collection_name, k_neighbors=None, constraints=constraints, results=results, ) all_queries.append(query) response, response_array = self.__run_vdms_query(all_queries, all_blobs) return response, response_array def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Query to look up. Text or path for image or video. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.embedding is None: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) # embedding_vector: List[float] = self._embed_query(query) embedding_vector: List[float] if not os.path.isfile(query) and hasattr(self.embedding, "embed_query"): embedding_vector = self._embed_query(query) elif os.path.isfile(query) and hasattr(self.embedding, "embed_image"): embedding_vector = self._embed_image(uris=[query])[0] elif os.path.isfile(query) and hasattr(self.embedding, "embed_video"): embedding_vector = self._embed_video(paths=[query])[0] else: error_msg = f"Could not generate embedding for query '{query}'." error_msg += "If using path for image or video, verify embedding model " error_msg += "has callable functions 'embed_image' or 'embed_video'." raise ValueError(error_msg) docs = self.max_marginal_relevance_search_by_vector( embedding_vector, k, fetch_k, lambda_mult=lambda_mult, filter=filter, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self.query_collection_embeddings( query_embeddings=[embedding], n_results=fetch_k, filter=filter, include=["metadatas", "documents", "distances", "embeddings"], ) if len(results[0][1]) == 0: # No results returned return [] else: embedding_list = [ list(_bytes2embedding(result)) for result in results[0][1] ] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list, k=k, lambda_mult=lambda_mult, ) candidates = _results_to_docs(results) selected_results = [ r for i, r in enumerate(candidates) if i in mmr_selected ] return selected_results def max_marginal_relevance_search_with_score( self, query: str, k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Query to look up. Text or path for image or video. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ if self.embedding is None: raise ValueError( "For MMR search, you must specify an embedding function on" "creation." ) if not os.path.isfile(query) and hasattr(self.embedding, "embed_query"): embedding = self._embed_query(query) elif os.path.isfile(query) and hasattr(self.embedding, "embed_image"): embedding = self._embed_image(uris=[query])[0] elif os.path.isfile(query) and hasattr(self.embedding, "embed_video"): embedding = self._embed_video(paths=[query])[0] else: error_msg = f"Could not generate embedding for query '{query}'." error_msg += "If using path for image or video, verify embedding model " error_msg += "has callable functions 'embed_image' or 'embed_video'." raise ValueError(error_msg) docs = self.max_marginal_relevance_search_with_score_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult, filter=filter, ) return docs def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, lambda_mult: float = 0.5, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ results = self.query_collection_embeddings( query_embeddings=[embedding], n_results=fetch_k, filter=filter, include=["metadatas", "documents", "distances", "embeddings"], ) if len(results[0][1]) == 0: # No results returned return [] else: embedding_list = [ list(_bytes2embedding(result)) for result in results[0][1] ] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list, k=k, lambda_mult=lambda_mult, ) candidates = _results_to_docs_and_scores(results) selected_results = [ (r, s) for i, (r, s) in enumerate(candidates) if i in mmr_selected ] return selected_results def query_collection_embeddings( self, query_embeddings: Optional[List[List[float]]] = None, collection_name: Optional[str] = None, n_results: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, filter: Union[None, Dict[str, Any]] = None, results: Union[None, Dict[str, Any]] = None, normalize_distance: bool = False, **kwargs: Any, ) -> List[Tuple[Dict[str, Any], List]]: all_responses: List[Any] = [] if collection_name is None: collection_name = self._collection_name if query_embeddings is None: return all_responses include = kwargs.get("include", ["metadatas"]) if results is None and "metadatas" in include: results = { "list": self.collection_properties, "blob": "embeddings" in include, } for qemb in query_embeddings: response, response_array = self.get_descriptor_response( "FindDescriptor", collection_name, k_neighbors=n_results, fetch_k=fetch_k, constraints=filter, results=results, normalize_distance=normalize_distance, query_embedding=qemb, ) all_responses.append([response, response_array]) return all_responses def similarity_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with VDMS. Args: query (str): Query to look up. Text or path for image or video. k (int): Number of results to return. Defaults to 3. fetch_k (int): Number of candidates to fetch for knn (>= k). filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_score( query, k=k, fetch_k=fetch_k, filter=filter, **kwargs ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 3. fetch_k (int): Number of candidates to fetch for knn (>= k). filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ results = self.query_collection_embeddings( query_embeddings=[embedding], n_results=k, fetch_k=fetch_k, filter=filter, **kwargs, ) return _results_to_docs(results) def similarity_search_with_score( self, query: str, k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with VDMS with distance. Args: query (str): Query to look up. Text or path for image or video. k (int): Number of results to return. Defaults to 3. fetch_k (int): Number of candidates to fetch for knn (>= k). filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self.embedding is None: raise ValueError("Must provide embedding function") else: if not os.path.isfile(query) and hasattr(self.embedding, "embed_query"): query_embedding: List[float] = self._embed_query(query) elif os.path.isfile(query) and hasattr(self.embedding, "embed_image"): query_embedding = self._embed_image(uris=[query])[0] elif os.path.isfile(query) and hasattr(self.embedding, "embed_video"): query_embedding = self._embed_video(paths=[query])[0] else: error_msg = f"Could not generate embedding for query '{query}'." error_msg += "If using path for image or video, verify embedding model " error_msg += "has callable functions 'embed_image' or 'embed_video'." raise ValueError(error_msg) results = self.query_collection_embeddings( query_embeddings=[query_embedding], n_results=k, fetch_k=fetch_k, filter=filter, **kwargs, ) return _results_to_docs_and_scores(results) def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = DEFAULT_FETCH_K, filter: Optional[Dict[str, List]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Return docs most similar to embedding vector and similarity score. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 3. fetch_k (int): Number of candidates to fetch for knn (>= k). filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text. Lower score represents more similarity. """ # kwargs["normalize_distance"] = True results = self.query_collection_embeddings( query_embeddings=[embedding], n_results=k, fetch_k=fetch_k, filter=filter, **kwargs, ) return _results_to_docs_and_scores(results) def update_document( self, collection_name: str, document_id: str, document: Document ) -> None: """Update a document in the collection. Args: document_id (str): ID of the document to update. document (Document): Document to update. """ return self.update_documents(collection_name, [document_id], [document]) def update_documents( self, collection_name: str, ids: List[str], documents: List[Document] ) -> None: """Update a document in the collection. Args: ids (List[str]): List of ids of the document to update. documents (List[Document]): List of documents to update. """ text = [document.page_content for document in documents] metadata = [ _validate_vdms_properties(document.metadata) for document in documents ] embeddings = self._embed_documents(text) self.__update( collection_name, ids, metadatas=metadata, embeddings=embeddings, documents=text, ) # VDMS UTILITY def _add_descriptor( command_str: str, setname: str, label: Optional[str] = None, ref: Optional[int] = None, props: Optional[dict] = None, link: Optional[dict] = None, k_neighbors: Optional[int] = None, constraints: Optional[dict] = None, results: Optional[dict] = None, ) -> Dict[str, Dict[str, Any]]: entity: Dict[str, Any] = {"set": setname} if "Add" in command_str and label: entity["label"] = label if ref is not None: entity["_ref"] = ref if props not in INVALID_METADATA_VALUE: entity["properties"] = props if "Add" in command_str and link is not None: entity["link"] = link if "Find" in command_str and k_neighbors is not None: entity["k_neighbors"] = int(k_neighbors) if "Find" in command_str and constraints not in INVALID_METADATA_VALUE: entity["constraints"] = constraints if "Find" in command_str and results not in INVALID_METADATA_VALUE: entity["results"] = results query = {command_str: entity} return query def _add_descriptorset( command_str: str, name: str, num_dims: Optional[int] = None, engine: Optional[str] = None, metric: Optional[str] = None, ref: Optional[int] = None, props: Optional[Dict] = None, link: Optional[Dict] = None, storeIndex: bool = False, constraints: Optional[Dict] = None, results: Optional[Dict] = None, ) -> Dict[str, Any]: if command_str == "AddDescriptorSet" and all( var is not None for var in [name, num_dims] ): entity: Dict[str, Any] = { "name": name, "dimensions": num_dims, } if engine is not None: entity["engine"] = engine if metric is not None: entity["metric"] = metric if ref is not None: entity["_ref"] = ref if props not in [None, {}]: entity["properties"] = props if link is not None: entity["link"] = link elif command_str == "FindDescriptorSet": entity = {"set": name} if storeIndex: entity["storeIndex"] = storeIndex if constraints not in [None, {}]: entity["constraints"] = constraints if results is not None: entity["results"] = results else: raise ValueError(f"Unknown command: {command_str}") query = {command_str: entity} return query def _add_entity_with_blob( collection_name: str, all_properties: List ) -> Tuple[Dict[str, Any], bytes]: all_properties_str = ",".join(all_properties) if len(all_properties) > 0 else "" querytype = "AddEntity" entity: Dict[str, Any] = {} entity["class"] = "properties" entity["blob"] = True # New props: Dict[str, Any] = {"name": collection_name} props["type"] = "queryable properties" props["content"] = all_properties_str entity["properties"] = props byte_data = _str2bytes(all_properties_str) query: Dict[str, Any] = {} query[querytype] = entity return query, byte_data def _build_property_query( collection_name: str, command_type: str = "find", all_properties: List = [], ref: Optional[int] = None, ) -> Tuple[Any, Any]: all_queries: List[Any] = [] blob_arr: List[Any] = [] choices = ["find", "add", "update"] if command_type.lower() not in choices: raise ValueError("[!] Invalid type. Choices are : {}".format(",".join(choices))) if command_type.lower() == "find": query = _find_property_entity(collection_name, unique_entity=True) all_queries.append(query) elif command_type.lower() == "add": query, byte_data = _add_entity_with_blob(collection_name, all_properties) all_queries.append(query) blob_arr.append(byte_data) elif command_type.lower() == "update": # Find & Delete query = _find_property_entity(collection_name, deletion=True) all_queries.append(query) # Add query, byte_data = _add_entity_with_blob(collection_name, all_properties) all_queries.append(query) blob_arr.append(byte_data) return all_queries, blob_arr def _bytes2embedding(blob: bytes) -> Any: emb = np.frombuffer(blob, dtype="float32") return emb def _bytes2str(in_bytes: bytes) -> str: return in_bytes.decode() def _get_cmds_from_query(all_queries: list) -> List[str]: return list(set([k for q in all_queries for k in q.keys()])) def _check_valid_response(all_queries: List[dict], response: Any) -> bool: cmd_list = _get_cmds_from_query(all_queries) valid_res = isinstance(response, list) and any( cmd in response[0] and "returned" in response[0][cmd] and response[0][cmd]["returned"] > 0 for cmd in cmd_list ) return valid_res def _check_descriptor_exists_by_id( client: vdms.vdms, setname: str, id: str, ) -> Tuple[bool, Any]: constraints = {"id": ["==", id]} findDescriptor = _add_descriptor( "FindDescriptor", setname, constraints=constraints, results={"list": ["id"], "count": ""}, ) all_queries = [findDescriptor] res, _ = client.query(all_queries) valid_res = _check_valid_response(all_queries, res) return valid_res, findDescriptor def embedding2bytes(embedding: Union[List[float], None]) -> Union[bytes, None]: """Convert embedding to bytes.""" blob = None if embedding is not None: emb = np.array(embedding, dtype="float32") blob = emb.tobytes() return blob def _find_property_entity( collection_name: str, unique_entity: Optional[bool] = False, deletion: Optional[bool] = False, ) -> Dict[str, Dict[str, Any]]: querytype = "FindEntity" entity: Dict[str, Any] = {} entity["class"] = "properties" if unique_entity: entity["unique"] = unique_entity results: Dict[str, Any] = {} results["blob"] = True results["count"] = "" results["list"] = ["content"] entity["results"] = results constraints: Dict[str, Any] = {} if deletion: constraints["_deletion"] = ["==", 1] constraints["name"] = ["==", collection_name] entity["constraints"] = constraints query: Dict[str, Any] = {} query[querytype] = entity return query def _str2bytes(in_str: str) -> bytes: return str.encode(in_str) def _validate_vdms_properties(metadata: Dict[str, Any]) -> Dict: new_metadata: Dict[str, Any] = {} for key, value in metadata.items(): if not isinstance(value, list): new_metadata[str(key)] = value return new_metadata
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/inmemory.py
from langchain_core.vectorstores import InMemoryVectorStore __all__ = [ "InMemoryVectorStore", ]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/llm_rails.py
"""Wrapper around LLMRails vector database.""" from __future__ import annotations import json import logging import os import uuid from typing import Any, Iterable, List, Optional, Tuple import requests from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore, VectorStoreRetriever from pydantic import Field class LLMRails(VectorStore): """Implementation of Vector Store using LLMRails. See https://llmrails.com/ Example: .. code-block:: python from langchain_community.vectorstores import LLMRails vectorstore = LLMRails( api_key=llm_rails_api_key, datastore_id=datastore_id ) """ def __init__( self, datastore_id: Optional[str] = None, api_key: Optional[str] = None, ): """Initialize with LLMRails API.""" self._datastore_id = datastore_id or os.environ.get("LLM_RAILS_DATASTORE_ID") self._api_key = api_key or os.environ.get("LLM_RAILS_API_KEY") if self._api_key is None: logging.warning("Can't find Rails credentials in environment.") self._session = requests.Session() # to reuse connections self.datastore_id = datastore_id self.base_url = "https://api.llmrails.com/v1" def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" return {"X-API-KEY": self._api_key} def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. Returns: List of ids from adding the texts into the vectorstore. """ names: List[str] = [] for text in texts: doc_name = str(uuid.uuid4()) response = self._session.post( f"{self.base_url}/datastores/{self._datastore_id}/text", json={"name": doc_name, "text": text}, verify=True, headers=self._get_post_headers(), ) if response.status_code != 200: logging.error( f"Create request failed for doc_name = {doc_name} with status code " f"{response.status_code}, reason {response.reason}, text " f"{response.text}" ) return names names.append(doc_name) return names def add_files( self, files_list: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> bool: """ LLMRails provides a way to add documents directly via our API where pre-processing and chunking occurs internally in an optimal way This method provides a way to use that API in LangChain Args: files_list: Iterable of strings, each representing a local file path. Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc. see API docs for full list Returns: List of ids associated with each of the files indexed """ files = [] for file in files_list: if not os.path.exists(file): logging.error(f"File {file} does not exist, skipping") continue files.append(("file", (os.path.basename(file), open(file, "rb")))) response = self._session.post( f"{self.base_url}/datastores/{self._datastore_id}/file", files=files, verify=True, headers=self._get_post_headers(), ) if response.status_code != 200: logging.error( f"Create request failed for datastore = {self._datastore_id} " f"with status code {response.status_code}, reason {response.reason}, " f"text {response.text}" ) return False return True def similarity_search_with_score( self, query: str, k: int = 5 ) -> List[Tuple[Document, float]]: """Return LLMRails documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5 Max 10. alpha: parameter for hybrid search . Returns: List of Documents most similar to the query and score for each. """ response = self._session.post( headers=self._get_post_headers(), url=f"{self.base_url}/datastores/{self._datastore_id}/search", data=json.dumps({"k": k, "text": query}), timeout=10, ) if response.status_code != 200: logging.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return [] results = response.json()["results"] docs = [ ( Document( page_content=x["text"], metadata={ key: value for key, value in x["metadata"].items() if key != "score" }, ), x["metadata"]["score"], ) for x in results ] return docs def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return LLMRails documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score(query, k=k) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> LLMRails: """Construct LLMRails wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import LLMRails llm_rails = LLMRails.from_texts( texts, datastore_id=datastore_id, api_key=llm_rails_api_key ) """ # Note: LLMRails generates its own embeddings, so we ignore the provided # embeddings (required by interface) llm_rails = cls(**kwargs) llm_rails.add_texts(texts) return llm_rails def as_retriever(self, **kwargs: Any) -> LLMRailsRetriever: return LLMRailsRetriever(vectorstore=self, **kwargs) class LLMRailsRetriever(VectorStoreRetriever): # type: ignore[override] """Retriever for LLMRails.""" vectorstore: LLMRails search_kwargs: dict = Field(default_factory=lambda: {"k": 5}) """Search params. k: Number of Documents to return. Defaults to 5. alpha: parameter for hybrid search . """ def add_texts(self, texts: List[str]) -> None: """Add text to the datastore. Args: texts (List[str]): The text """ self.vectorstore.add_texts(texts)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/pinecone.py
from __future__ import annotations import logging import os import uuid import warnings from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, Union import numpy as np from langchain_core._api.deprecation import deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils.iter import batch_iterate from langchain_core.vectorstores import VectorStore from packaging import version from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) if TYPE_CHECKING: from pinecone import Index logger = logging.getLogger(__name__) def _import_pinecone() -> Any: try: import pinecone except ImportError as e: raise ImportError( "Could not import pinecone python package. " "Please install it with `pip3 install pinecone`." ) from e return pinecone def _is_pinecone_v3() -> bool: pinecone = _import_pinecone() pinecone_client_version = pinecone.__version__ return version.parse(pinecone_client_version) >= version.parse("3.0.0.dev") @deprecated( since="0.0.18", removal="1.0", alternative_import="langchain_pinecone.Pinecone" ) class Pinecone(VectorStore): """`Pinecone` vector store. To use, you should have the ``pinecone`` python package installed. This version of Pinecone is deprecated. Please use `langchain_pinecone.Pinecone` instead. """ def __init__( self, index: Any, embedding: Union[Embeddings, Callable], text_key: str, namespace: Optional[str] = None, distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE, ): """Initialize with Pinecone client.""" pinecone = _import_pinecone() if not isinstance(embedding, Embeddings): warnings.warn( "Passing in `embedding` as a Callable is deprecated. Please pass in an" " Embeddings object instead." ) if not isinstance(index, pinecone.Index): raise ValueError( f"client should be an instance of pinecone.Index, " f"got {type(index)}" ) self._index = index self._embedding = embedding self._text_key = text_key self._namespace = namespace self.distance_strategy = distance_strategy @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" if isinstance(self._embedding, Embeddings): return self._embedding return None def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]: """Embed search docs.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_documents(list(texts)) return [self._embedding(t) for t in texts] def _embed_query(self, text: str) -> List[float]: """Embed query text.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_query(text) return self._embedding(text) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, embedding_chunk_size: int = 1000, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Upsert optimization is done by chunking the embeddings and upserting them. This is done to avoid memory issues and optimize using HTTP based embeddings. For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index, embedding_chunk_size>1000 and batch_size~64 for best performance. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. namespace: Optional pinecone namespace to add the texts to. batch_size: Batch size to use when adding the texts to the vectorstore. embedding_chunk_size: Chunk size to use when embedding the texts. Returns: List of ids from adding the texts into the vectorstore. """ if namespace is None: namespace = self._namespace texts = list(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] metadatas = metadatas or [{} for _ in texts] for metadata, text in zip(metadatas, texts): metadata[self._text_key] = text # For loops to avoid memory issues and optimize when using HTTP based embeddings # The first loop runs the embeddings, it benefits when using OpenAI embeddings # The second loops runs the pinecone upsert asynchronously. for i in range(0, len(texts), embedding_chunk_size): chunk_texts = texts[i : i + embedding_chunk_size] chunk_ids = ids[i : i + embedding_chunk_size] chunk_metadatas = metadatas[i : i + embedding_chunk_size] embeddings = self._embed_documents(chunk_texts) async_res = [ self._index.upsert( vectors=batch, namespace=namespace, async_req=True, **kwargs, ) for batch in batch_iterate( batch_size, zip(chunk_ids, embeddings, chunk_metadatas) ) ] [res.get() for res in async_res] return ids def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ return self.similarity_search_by_vector_with_score( self._embed_query(query), k=k, filter=filter, namespace=namespace ) def similarity_search_by_vector_with_score( self, embedding: List[float], *, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, ) -> List[Tuple[Document, float]]: """Return pinecone documents most similar to embedding, along with scores.""" if namespace is None: namespace = self._namespace docs = [] results = self._index.query( vector=[embedding], top_k=k, include_metadata=True, namespace=namespace, filter=filter, ) for res in results["matches"]: metadata = res["metadata"] if self._text_key in metadata: text = metadata.pop(self._text_key) score = res["score"] docs.append((Document(page_content=text, metadata=metadata), score)) else: logger.warning( f"Found document with no `{self._text_key}` key. Skipping." ) return docs def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return pinecone documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Dictionary of argument(s) to filter on metadata namespace: Namespace to search in. Default will search in '' namespace. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, filter=filter, namespace=namespace, **kwargs ) return [doc for doc, _ in docs_and_scores] def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn else: raise ValueError( "Unknown distance strategy, must be cosine, max_inner_product " "(dot product), or euclidean" ) @staticmethod def _cosine_relevance_score_fn(score: float) -> float: """Pinecone returns cosine similarity scores between [-1,1]""" return (score + 1) / 2 def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if namespace is None: namespace = self._namespace results = self._index.query( vector=[embedding], top_k=fetch_k, include_values=True, include_metadata=True, namespace=namespace, filter=filter, ) mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), [item["values"] for item in results["matches"]], k=k, lambda_mult=lambda_mult, ) selected = [results["matches"][i]["metadata"] for i in mmr_selected] return [ Document(page_content=metadata.pop((self._text_key)), metadata=metadata) for metadata in selected ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter, namespace ) @classmethod def get_pinecone_index( cls, index_name: Optional[str], pool_threads: int = 4, ) -> Index: """Return a Pinecone Index instance. Args: index_name: Name of the index to use. pool_threads: Number of threads to use for index upsert. Returns: Pinecone Index instance.""" pinecone = _import_pinecone() if _is_pinecone_v3(): pinecone_instance = pinecone.Pinecone( api_key=os.environ.get("PINECONE_API_KEY"), pool_threads=pool_threads ) indexes = pinecone_instance.list_indexes() index_names = [i.name for i in indexes.index_list["indexes"]] else: index_names = pinecone.list_indexes() if index_name in index_names: index = ( pinecone_instance.Index(index_name) if _is_pinecone_v3() else pinecone.Index(index_name, pool_threads=pool_threads) ) elif len(index_names) == 0: raise ValueError( "No active indexes found in your Pinecone project, " "are you sure you're using the right Pinecone API key and Environment? " "Please double check your Pinecone dashboard." ) else: raise ValueError( f"Index '{index_name}' not found in your Pinecone project. " f"Did you mean one of the following indexes: {', '.join(index_names)}" ) return index @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = "text", namespace: Optional[str] = None, index_name: Optional[str] = None, upsert_kwargs: Optional[dict] = None, pool_threads: int = 4, embeddings_chunk_size: int = 1000, **kwargs: Any, ) -> Pinecone: """ DEPRECATED: use langchain_pinecone.PineconeVectorStore.from_texts instead: Construct Pinecone wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Pinecone index This is intended to be a quick way to get started. The `pool_threads` affects the speed of the upsert operations. Example: .. code-block:: python from langchain_pinecone import PineconeVectorStore from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index_name = "my-index" namespace = "my-namespace" vectorstore = Pinecone( index_name=index_name, embedding=embedding, namespace=namespace, ) """ pinecone_index = cls.get_pinecone_index(index_name, pool_threads) pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs) pinecone.add_texts( texts, metadatas=metadatas, ids=ids, namespace=namespace, batch_size=batch_size, embedding_chunk_size=embeddings_chunk_size, **(upsert_kwargs or {}), ) return pinecone @classmethod def from_existing_index( cls, index_name: str, embedding: Embeddings, text_key: str = "text", namespace: Optional[str] = None, pool_threads: int = 4, ) -> Pinecone: """Load pinecone vectorstore from index name.""" pinecone_index = cls.get_pinecone_index(index_name, pool_threads) return cls(pinecone_index, embedding, text_key, namespace) def delete( self, ids: Optional[List[str]] = None, delete_all: Optional[bool] = None, namespace: Optional[str] = None, filter: Optional[dict] = None, **kwargs: Any, ) -> None: """Delete by vector IDs or filter. Args: ids: List of ids to delete. filter: Dictionary of conditions to filter vectors to delete. """ if namespace is None: namespace = self._namespace if delete_all: self._index.delete(delete_all=True, namespace=namespace, **kwargs) elif ids is not None: chunk_size = 1000 for i in range(0, len(ids), chunk_size): chunk = ids[i : i + chunk_size] self._index.delete(ids=chunk, namespace=namespace, **kwargs) elif filter is not None: self._index.delete(filter=filter, namespace=namespace, **kwargs) else: raise ValueError("Either ids, delete_all, or filter must be provided.") return None
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/meilisearch.py
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_env from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain_community.vectorstores import Meilisearch from langchain_community.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() embedders = { "theEmbedderName": { "source": "userProvided", "dimensions": "1536" } } vectorstore = Meilisearch( embedding=embeddings, embedders=embedders, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", *, embedders: Optional[Dict[str, Any]] = None, ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key self._embedders = embedders self._embedders_settings = self._client.index( str(self._index_name) ).update_embedders(embedders) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, embedder_name: Optional[str] = "default", **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. embedder_name: Name of the embedder. Defaults to "default". metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": {f"{embedder_name}": embedding}, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, embedder_name: Optional[str] = "default", **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. embedder_name: Name of the embedder to be used. Defaults to "default". k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, embedder_name=embedder_name, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, embedder_name: Optional[str] = "default", **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. embedder_name: Name of the embedder to be used. Defaults to "default". k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, embedder_name=embedder_name, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], embedder_name: Optional[str] = "default", k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. embedder_name: Name of the embedder to be used. Defaults to "default". k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", { "vector": embedding, "hybrid": {"semanticRatio": 1.0, "embedder": embedder_name}, "limit": k, "filter": filter, "showRankingScore": True, }, ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_rankingScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, embedder_name: Optional[str] = "default", **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. embedder_name: Name of the embedder to be used. Defaults to "default". k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, embedder_name=embedder_name, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", embedders: Dict[str, Any] = {}, embedder_name: Optional[str] = "default", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Meilisearch from langchain_community.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embedding = OpenAIEmbeddings() embedders: Embedders index setting. embedder_name: Name of the embedder. Defaults to "default". docsearch = Meilisearch.from_texts( client=client, embedding=embedding, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, embedders=embedders, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, embedder_name=embedder_name, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/timescalevector.py
"""VectorStore wrapper around a Postgres-TimescaleVector database.""" from __future__ import annotations import enum import logging import uuid from datetime import timedelta from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, Union, ) from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import DistanceStrategy if TYPE_CHECKING: from timescale_vector import Predicates DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain_store" class TimescaleVector(VectorStore): """Timescale Postgres vector store To use, you should have the ``timescale_vector`` python package installed. Args: service_url: Service url on timescale cloud. embedding: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface. collection_name: The name of the collection to use. (default: langchain_store) This will become the table name used for the collection. distance_strategy: The distance strategy to use. (default: COSINE) pre_delete_collection: If True, will delete the collection if it exists. (default: False). Useful for testing. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings.openai import OpenAIEmbeddings SERVICE_URL = "postgres://tsdbadmin:<password>@<id>.tsdb.cloud.timescale.com:<port>/tsdb?sslmode=require" COLLECTION_NAME = "state_of_the_union_test" embeddings = OpenAIEmbeddings() vectorestore = TimescaleVector.from_documents( embedding=embeddings, documents=docs, collection_name=COLLECTION_NAME, service_url=SERVICE_URL, ) """ def __init__( self, service_url: str, embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, num_dimensions: int = ADA_TOKEN_COUNT, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, time_partition_interval: Optional[timedelta] = None, **kwargs: Any, ) -> None: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) self.service_url = service_url self.embedding = embedding self.collection_name = collection_name self.num_dimensions = num_dimensions self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self._time_partition_interval = time_partition_interval self.sync_client = client.Sync( self.service_url, self.collection_name, self.num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs, ) self.async_client = client.Async( self.service_url, self.collection_name, self.num_dimensions, self._distance_strategy.value.lower(), time_partition_interval=self._time_partition_interval, **kwargs, ) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ self.sync_client.create_tables() if self.pre_delete_collection: self.sync_client.delete_all() @property def embeddings(self) -> Embeddings: return self.embedding def drop_tables(self) -> None: self.sync_client.drop_table() @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, service_url: Optional[str] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: num_dimensions = len(embeddings[0]) if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] if service_url is None: service_url = cls.get_service_url(kwargs) store = cls( service_url=service_url, num_dimensions=num_dimensions, collection_name=collection_name, embedding=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store @classmethod async def __afrom( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, service_url: Optional[str] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: num_dimensions = len(embeddings[0]) if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] if service_url is None: service_url = cls.get_service_url(kwargs) store = cls( service_url=service_url, num_dimensions=num_dimensions, collection_name=collection_name, embedding=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) await store.aadd_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] records = list(zip(ids, metadatas, texts, embeddings)) self.sync_client.upsert(records) return ids async def aadd_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] records = list(zip(ids, metadatas, texts, embeddings)) await self.async_client.upsert(records) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return await self.aadd_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) def _embed_query(self, query: str) -> Optional[List[float]]: # an empty query should not be embedded if query is None or query == "" or query.isspace(): return None else: return self.embedding.embed_query(query) def similarity_search( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with TimescaleVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self._embed_query(query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) async def asimilarity_search( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with TimescaleVector with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self._embed_query(query) return await self.asimilarity_search_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self._embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) return docs async def asimilarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self._embed_query(query) return await self.asimilarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs, ) def date_to_range_filter(self, **kwargs: Any) -> Any: constructor_args = { key: kwargs[key] for key in [ "start_date", "end_date", "time_delta", "start_inclusive", "end_inclusive", ] if key in kwargs } if not constructor_args or len(constructor_args) == 0: return None try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) return client.UUIDTimeRange(**constructor_args) def similarity_search_with_score_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) results = self.sync_client.search( embedding, limit=k, filter=filter, predicates=predicates, uuid_time_filter=self.date_to_range_filter(**kwargs), ) docs = [ ( Document( page_content=result[client.SEARCH_RESULT_CONTENTS_IDX], metadata=result[client.SEARCH_RESULT_METADATA_IDX], ), result[client.SEARCH_RESULT_DISTANCE_IDX], ) for result in results ] return docs async def asimilarity_search_with_score_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) results = await self.async_client.search( embedding, limit=k, filter=filter, predicates=predicates, uuid_time_filter=self.date_to_range_filter(**kwargs), ) docs = [ ( Document( page_content=result[client.SEARCH_RESULT_CONTENTS_IDX], metadata=result[client.SEARCH_RESULT_METADATA_IDX], ), result[client.SEARCH_RESULT_DISTANCE_IDX], ) for result in results ] return docs def similarity_search_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs ) return [doc for doc, _ in docs_and_scores] async def asimilarity_search_by_vector( self, embedding: Optional[List[float]], k: int = 4, filter: Optional[Union[dict, list]] = None, predicates: Optional[Predicates] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = await self.asimilarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[TimescaleVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod async def afrom_texts( cls: Type[TimescaleVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. """ embeddings = embedding.embed_documents(list(texts)) return await cls.__afrom( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """Construct TimescaleVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod async def afrom_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """Construct TimescaleVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the TIMESCALE_SERVICE_URL environment variable. Example: .. code-block:: python from langchain_community.vectorstores import TimescaleVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return await cls.__afrom( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[TimescaleVector], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, **kwargs: Any, ) -> TimescaleVector: """ Get instance of an existing TimescaleVector store.This method will return the instance of the store without inserting any new embeddings """ service_url = cls.get_service_url(kwargs) store = cls( service_url=service_url, collection_name=collection_name, embedding=embedding, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, ) return store @classmethod def get_service_url(cls, kwargs: Dict[str, Any]) -> str: service_url: str = get_from_dict_or_env( data=kwargs, key="service_url", env_key="TIMESCALE_SERVICE_URL", ) if not service_url: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the TIMESCALE_SERVICE_URL environment variable." ) return service_url @classmethod def service_url_from_db_params( cls, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql://{user}:{password}@{host}:{port}/{database}" def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to TimescaleVector constructor." ) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids is None: raise ValueError("No ids provided to delete.") self.sync_client.delete_by_ids(ids) return True # todo should this be part of delete|()? def delete_by_metadata( self, filter: Union[Dict[str, str], List[Dict[str, str]]], **kwargs: Any ) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ self.sync_client.delete_by_metadata(filter) return True class IndexType(str, enum.Enum): """Enumerator for the supported Index types""" TIMESCALE_VECTOR = "tsv" PGVECTOR_IVFFLAT = "ivfflat" PGVECTOR_HNSW = "hnsw" DEFAULT_INDEX_TYPE = IndexType.TIMESCALE_VECTOR def create_index( self, index_type: Union[IndexType, str] = DEFAULT_INDEX_TYPE, **kwargs: Any ) -> None: try: from timescale_vector import client except ImportError: raise ImportError( "Could not import timescale_vector python package. " "Please install it with `pip install timescale-vector`." ) index_type = ( index_type.value if isinstance(index_type, self.IndexType) else index_type ) if index_type == self.IndexType.PGVECTOR_IVFFLAT.value: self.sync_client.create_embedding_index(client.IvfflatIndex(**kwargs)) if index_type == self.IndexType.PGVECTOR_HNSW.value: self.sync_client.create_embedding_index(client.HNSWIndex(**kwargs)) if index_type == self.IndexType.TIMESCALE_VECTOR.value: self.sync_client.create_embedding_index( client.TimescaleVectorIndex(**kwargs) ) def drop_index(self) -> None: self.sync_client.drop_embedding_index()
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/vikingdb.py
from __future__ import annotations import logging import uuid from typing import Any, List, Optional, Tuple import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class VikingDBConfig(object): """vikingdb connection config See the following documentation for details: https://www.volcengine.com/docs/6459/1167770 Attribute: host(str):The access address of the vector database server that the client needs to connect to. region(str):"cn-shanghai" or "cn-beijing" ak(str):Access Key ID, security credentials for accessing Volcano Engine services. sk(str):Secret Access Key, security credentials for accessing Volcano Engine services. scheme(str):http or https, defaulting to http. """ def __init__(self, host="host", region="region", ak="ak", sk="sk", scheme="http"): # type: ignore[no-untyped-def] self.host = host self.region = region self.ak = ak self.sk = sk self.scheme = scheme class VikingDB(VectorStore): """vikingdb as a vector store In order to use this you need to have a database instance. See the following documentation for details: https://www.volcengine.com/docs/6459/1167774 """ def __init__( self, embedding_function: Embeddings, collection_name: str = "LangChainCollection", connection_args: Optional[VikingDBConfig] = None, index_params: Optional[dict] = None, drop_old: Optional[bool] = False, **kwargs: Any, ): try: from volcengine.viking_db import Collection, VikingDBService except ImportError: raise ImportError( "Could not import volcengine python package. " "Please install it with `pip install --upgrade volcengine`." ) self.embedding_func = embedding_function self.collection_name = collection_name self.index_name = "LangChainIndex" self.connection_args = connection_args self.index_params = index_params self.drop_old = drop_old self.service = VikingDBService( connection_args.host, # type: ignore[union-attr] connection_args.region, # type: ignore[union-attr] connection_args.ak, # type: ignore[union-attr] connection_args.sk, # type: ignore[union-attr] connection_args.scheme, # type: ignore[union-attr] ) try: col = self.service.get_collection(collection_name) except Exception: col = None self.collection = col self.index = None if self.collection is not None: self.index = self.service.get_index(self.collection_name, self.index_name) if drop_old and isinstance(self.collection, Collection): indexes = self.service.list_indexes(collection_name) for index in indexes: self.service.drop_index(collection_name, index.index_name) self.service.drop_collection(collection_name) self.collection = None self.index = None @property def embeddings(self) -> Embeddings: return self.embedding_func def _create_collection( self, embeddings: List, metadatas: Optional[List[dict]] = None ) -> None: try: from volcengine.viking_db import Field, FieldType except ImportError: raise ImportError( "Could not import volcengine python package. " "Please install it with `pip install --upgrade volcengine`." ) dim = len(embeddings[0]) fields = [] if metadatas: for key, value in metadatas[0].items(): # print(key, value) if isinstance(value, str): fields.append(Field(key, FieldType.String)) elif isinstance(value, int): fields.append(Field(key, FieldType.Int64)) elif isinstance(value, bool): fields.append(Field(key, FieldType.Bool)) elif isinstance(value, list) and all( isinstance(item, str) for item in value ): fields.append(Field(key, FieldType.List_String)) elif isinstance(value, list) and all( isinstance(item, int) for item in value ): fields.append(Field(key, FieldType.List_Int64)) elif isinstance(value, bytes): fields.append(Field(key, FieldType.Text)) else: raise ValueError( "metadatas value is invalid" "please change the type of metadatas." ) # fields.append(Field("text", FieldType.String)) fields.append(Field("text", FieldType.Text)) fields.append(Field("primary_key", FieldType.String, is_primary_key=True)) fields.append(Field("vector", FieldType.Vector, dim=dim)) self.collection = self.service.create_collection(self.collection_name, fields) def _create_index(self) -> None: try: from volcengine.viking_db import VectorIndexParams except ImportError: raise ImportError( "Could not import volcengine python package. " "Please install it with `pip install --upgrade volcengine`." ) cpu_quota = 2 vector_index = VectorIndexParams() partition_by = "" scalar_index = None if self.index_params is not None: if self.index_params.get("cpu_quota") is not None: cpu_quota = self.index_params["cpu_quota"] if self.index_params.get("vector_index") is not None: vector_index = self.index_params["vector_index"] if self.index_params.get("partition_by") is not None: partition_by = self.index_params["partition_by"] if self.index_params.get("scalar_index") is not None: scalar_index = self.index_params["scalar_index"] self.index = self.service.create_index( self.collection_name, self.index_name, vector_index=vector_index, cpu_quota=cpu_quota, partition_by=partition_by, scalar_index=scalar_index, ) def add_texts( # type: ignore[override] self, texts: List[str], metadatas: Optional[List[dict]] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """Insert text data into VikingDB.""" try: from volcengine.viking_db import Data except ImportError: raise ImportError( "Could not import volcengine python package. " "Please install it with `pip install --upgrade volcengine`." ) texts = list(texts) try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] if self.collection is None: self._create_collection(embeddings, metadatas) self._create_index() # insert data data = [] pks: List[str] = [] for index in range(len(embeddings)): primary_key = str(uuid.uuid4()) pks.append(primary_key) field = { "text": texts[index], "primary_key": primary_key, "vector": embeddings[index], } if metadatas is not None and index < len(metadatas): names = list(metadatas[index].keys()) for name in names: field[name] = metadatas[index].get(name) # type: ignore[assignment] data.append(Data(field)) total_count = len(data) for i in range(0, total_count, batch_size): end = min(i + batch_size, total_count) insert_data = data[i:end] # print(insert_data) self.collection.upsert_data(insert_data) # type: ignore[union-attr] return pks def similarity_search( # type: ignore[override] self, query: str, params: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string.""" res = self.similarity_search_with_score(query=query, params=params, **kwargs) return [doc for doc, _ in res] def similarity_search_with_score( self, query: str, params: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score.""" embedding = self.embedding_func.embed_query(query) res = self.similarity_search_with_score_by_vector( embedding=embedding, params=params, **kwargs ) return res def similarity_search_by_vector( # type: ignore[override] self, embedding: List[float], params: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string.""" res = self.similarity_search_with_score_by_vector( embedding=embedding, params=params, **kwargs ) return [doc for doc, _ in res] def similarity_search_with_score_by_vector( self, embedding: List[float], params: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score.""" if self.collection is None: logger.debug("No existing collection to search.") return [] filter = None limit = 10 output_fields = None partition = "default" if params is not None: if params.get("filter") is not None: filter = params["filter"] if params.get("limit") is not None: limit = params["limit"] if params.get("output_fields") is not None: output_fields = params["output_fields"] if params.get("partition") is not None: partition = params["partition"] res = self.index.search_by_vector( # type: ignore[union-attr] embedding, filter=filter, limit=limit, output_fields=output_fields, partition=partition, ) ret = [] for item in res: if "primary_key" in item.fields: item.fields.pop("primary_key") if "vector" in item.fields: item.fields.pop("vector") page_content = "" if "text" in item.fields: page_content = item.fields.pop("text") doc = Document(page_content=page_content, metadata=item.fields) pair = (doc, item.score) ret.append(pair) return ret def max_marginal_relevance_search( # type: ignore[override] self, query: str, k: int = 4, lambda_mult: float = 0.5, params: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR.""" embedding = self.embedding_func.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding=embedding, k=k, lambda_mult=lambda_mult, params=params, **kwargs, ) def max_marginal_relevance_search_by_vector( # type: ignore[override] self, embedding: List[float], k: int = 4, lambda_mult: float = 0.5, params: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR.""" if self.collection is None: logger.debug("No existing collection to search.") return [] filter = None limit = 10 output_fields = None partition = "default" if params is not None: if params.get("filter") is not None: filter = params["filter"] if params.get("limit") is not None: limit = params["limit"] if params.get("output_fields") is not None: output_fields = params["output_fields"] if params.get("partition") is not None: partition = params["partition"] res = self.index.search_by_vector( # type: ignore[union-attr] embedding, filter=filter, limit=limit, output_fields=output_fields, partition=partition, ) documents = [] ordered_result_embeddings = [] for item in res: if ( "vector" not in item.fields or "primary_key" not in item.fields or "text" not in item.fields ): continue ordered_result_embeddings.append(item.fields.pop("vector")) item.fields.pop("primary_key") page_content = item.fields.pop("text") doc = Document(page_content=page_content, metadata=item.fields) documents.append(doc) new_ordering = maximal_marginal_relevance( np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult ) # Reorder the values and return. ret = [] for x in new_ordering: # Function can return -1 index if x == -1: break else: ret.append(documents[x]) return ret def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> None: if self.collection is None: logger.debug("No existing collection to search.") self.collection.delete_data(ids) # type: ignore[union-attr] @classmethod def from_texts( # type: ignore[no-untyped-def, override] cls, texts: List[str], embedding: Embeddings, connection_args: Optional[VikingDBConfig] = None, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", index_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any, ): """Create a collection, indexes it and insert data.""" if connection_args is None: raise Exception("VikingDBConfig does not exists") vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args, index_params=index_params, drop_old=drop_old, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/hologres.py
from __future__ import annotations import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple, Type from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_pg_embedding" class Hologres(VectorStore): """`Hologres API` vector store. - `connection_string` is a hologres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `ndims` is the number of dimensions of the embedding output. - `table_name` is the name of the table to store embeddings and data. (default: langchain_pg_embedding) - NOTE: The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_table` if True, will delete the table if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string self.ndims = ndims self.table_name = table_name self.embedding_function = embedding_function self.pre_delete_table = pre_delete_table self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ from hologres_vector import HologresVector self.storage = HologresVector( self.connection_string, ndims=self.ndims, table_name=self.table_name, table_schema={"document": "text"}, pre_delete_table=self.pre_delete_table, ) @property def embeddings(self) -> Embeddings: return self.embedding_function @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding_function: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, embedding_function=embedding_function, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ try: schema_datas = [{"document": t} for t in texts] self.storage.upsert_vectors(embeddings, ids, metadatas, schema_datas) except Exception as e: self.logger.exception(e) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Hologres with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results: List[dict[str, Any]] = self.storage.search( embedding, k=k, select_columns=["document"], metadata_filters=filter ) docs = [ ( Document( page_content=result["document"], metadata=result["metadata"], ), result["distance"], ) for result in results ] return docs @classmethod def from_texts( cls: Type[Hologres], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from texts and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """Construct Hologres wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params Example: .. code-block:: python from langchain_community.vectorstores import Hologres from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, pre_delete_table=pre_delete_table, **kwargs, ) @classmethod def from_existing_index( cls: Type[Hologres], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, pre_delete_table: bool = False, **kwargs: Any, ) -> Hologres: """ Get instance of an existing Hologres store.This method will return the instance of the store without inserting any new embeddings """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, ndims=ndims, table_name=table_name, embedding_function=embedding, pre_delete_table=pre_delete_table, ) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="HOLOGRES_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Hologres connection string is required" "Either pass it as a parameter" "or set the HOLOGRES_CONNECTION_STRING environment variable." "Create the connection string by calling" "HologresVector.connection_string_from_db_params" ) return connection_string @classmethod def from_documents( cls: Type[Hologres], documents: List[Document], embedding: Embeddings, ndims: int = ADA_TOKEN_COUNT, table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Hologres: """ Return VectorStore initialized from documents and embeddings. Hologres connection string is required "Either pass it as a parameter or set the HOLOGRES_CONNECTION_STRING environment variable. Create the connection string by calling HologresVector.connection_string_from_db_params """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, ndims=ndims, table_name=table_name, **kwargs, ) @classmethod def connection_string_from_db_params( cls, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return ( f"dbname={database} user={user} password={password} host={host} port={port}" )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/sqlitevec.py
from __future__ import annotations import json import logging import struct import warnings from typing import ( TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type, ) from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: import sqlite3 logger = logging.getLogger(__name__) def serialize_f32(vector: List[float]) -> bytes: """Serializes a list of floats into a compact "raw bytes" format Source: https://github.com/asg017/sqlite-vec/blob/21c5a14fc71c83f135f5b00c84115139fd12c492/examples/simple-python/demo.py#L8-L10 """ return struct.pack("%sf" % len(vector), *vector) class SQLiteVec(VectorStore): """SQLite with Vec extension as a vector database. To use, you should have the ``sqlite-vec`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import SQLiteVec from langchain_community.embeddings.openai import OpenAIEmbeddings ... """ def __init__( self, table: str, connection: Optional[sqlite3.Connection], embedding: Embeddings, db_file: str = "vec.db", ): """Initialize with sqlite client with vss extension.""" try: import sqlite_vec # noqa # pylint: disable=unused-import except ImportError: raise ImportError( "Could not import sqlite-vec python package. " "Please install it with `pip install sqlite-vec`." ) if not connection: connection = self.create_connection(db_file) if not isinstance(embedding, Embeddings): warnings.warn("embeddings input must be Embeddings object.") self._connection = connection self._table = table self._embedding = embedding self.create_table_if_not_exists() def create_table_if_not_exists(self) -> None: self._connection.execute( f""" CREATE TABLE IF NOT EXISTS {self._table} ( rowid INTEGER PRIMARY KEY AUTOINCREMENT, text TEXT, metadata BLOB, text_embedding BLOB ) ; """ ) self._connection.execute( f""" CREATE VIRTUAL TABLE IF NOT EXISTS {self._table}_vec USING vec0( rowid INTEGER PRIMARY KEY, text_embedding float[{self.get_dimensionality()}] ) ; """ ) self._connection.execute( f""" CREATE TRIGGER IF NOT EXISTS embed_text AFTER INSERT ON {self._table} BEGIN INSERT INTO {self._table}_vec(rowid, text_embedding) VALUES (new.rowid, new.text_embedding) ; END; """ ) self._connection.commit() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add more texts to the vectorstore index. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters """ max_id = self._connection.execute( f"SELECT max(rowid) as rowid FROM {self._table}" ).fetchone()["rowid"] if max_id is None: # no text added yet max_id = 0 embeds = self._embedding.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] data_input = [ (text, json.dumps(metadata), serialize_f32(embed)) for text, metadata, embed in zip(texts, metadatas, embeds) ] self._connection.executemany( f"INSERT INTO {self._table}(text, metadata, text_embedding) " f"VALUES (?,?,?)", data_input, ) self._connection.commit() # pulling every ids we just inserted results = self._connection.execute( f"SELECT rowid FROM {self._table} WHERE rowid > {max_id}" ) return [row["rowid"] for row in results] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: sql_query = f""" SELECT text, metadata, distance FROM {self._table} AS e INNER JOIN {self._table}_vec AS v on v.rowid = e.rowid WHERE v.text_embedding MATCH ? AND k = ? ORDER BY distance """ cursor = self._connection.cursor() cursor.execute( sql_query, [serialize_f32(embedding), k], ) results = cursor.fetchall() documents = [] for row in results: metadata = json.loads(row["metadata"]) or {} doc = Document(page_content=row["text"], metadata=metadata) documents.append((doc, row["distance"])) return documents def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector( embedding=embedding, k=k ) return [doc for doc, _ in documents] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector( embedding=embedding, k=k ) return documents def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: documents = self.similarity_search_with_score_by_vector( embedding=embedding, k=k ) return [doc for doc, _ in documents] @classmethod def from_texts( cls: Type[SQLiteVec], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, table: str = "langchain", db_file: str = "vec.db", **kwargs: Any, ) -> SQLiteVec: """Return VectorStore initialized from texts and embeddings.""" connection = cls.create_connection(db_file) vec = cls( table=table, connection=connection, db_file=db_file, embedding=embedding ) vec.add_texts(texts=texts, metadatas=metadatas) return vec @staticmethod def create_connection(db_file: str) -> sqlite3.Connection: import sqlite3 import sqlite_vec connection = sqlite3.connect(db_file) connection.row_factory = sqlite3.Row connection.enable_load_extension(True) sqlite_vec.load(connection) connection.enable_load_extension(False) return connection def get_dimensionality(self) -> int: """ Function that does a dummy embedding to figure out how many dimensions this embedding function returns. Needed for the virtual table DDL. """ dummy_text = "This is a dummy text" dummy_embedding = self._embedding.embed_query(dummy_text) return len(dummy_embedding)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/aerospike.py
from __future__ import annotations import logging import uuid import warnings from typing import ( TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, TypeVar, Union, ) import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) if TYPE_CHECKING: from aerospike_vector_search import Client from aerospike_vector_search.types import Neighbor, VectorDistanceMetric logger = logging.getLogger(__name__) def _import_aerospike() -> Any: try: from aerospike_vector_search import Client except ImportError as e: raise ImportError( "Could not import aerospike_vector_search python package. " "Please install it with `pip install aerospike_vector`." ) from e return Client AVST = TypeVar("AVST", bound="Aerospike") class Aerospike(VectorStore): """`Aerospike` vector store. To use, you should have the ``aerospike_vector_search`` python package installed. """ def __init__( self, client: Client, embedding: Union[Embeddings, Callable], namespace: str, index_name: Optional[str] = None, vector_key: str = "_vector", text_key: str = "_text", id_key: str = "_id", set_name: Optional[str] = None, distance_strategy: Optional[ Union[DistanceStrategy, VectorDistanceMetric] ] = DistanceStrategy.EUCLIDEAN_DISTANCE, ): """Initialize with Aerospike client. Args: client: Aerospike client. embedding: Embeddings object or Callable (deprecated) to embed text. namespace: Namespace to use for storing vectors. This should match index_name: Name of the index previously created in Aerospike. This vector_key: Key to use for vector in metadata. This should match the key used during index creation. text_key: Key to use for text in metadata. id_key: Key to use for id in metadata. set_name: Default set name to use for storing vectors. distance_strategy: Distance strategy to use for similarity search This should match the distance strategy used during index creation. """ aerospike = _import_aerospike() if not isinstance(embedding, Embeddings): warnings.warn( "Passing in `embedding` as a Callable is deprecated. Please pass in an" " Embeddings object instead." ) if not isinstance(client, aerospike): raise ValueError( f"client should be an instance of aerospike_vector_search.Client, " f"got {type(client)}" ) self._client = client self._embedding = embedding self._text_key = text_key self._vector_key = vector_key self._id_key = id_key self._index_name = index_name self._namespace = namespace self._set_name = set_name self._distance_strategy = self.convert_distance_strategy(distance_strategy) @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" if isinstance(self._embedding, Embeddings): return self._embedding return None def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]: """Embed search docs.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_documents(list(texts)) return [self._embedding(t) for t in texts] def _embed_query(self, text: str) -> List[float]: """Embed query text.""" if isinstance(self._embedding, Embeddings): return self._embedding.embed_query(text) return self._embedding(text) @staticmethod def convert_distance_strategy( distance_strategy: Union[VectorDistanceMetric, DistanceStrategy], ) -> DistanceStrategy: """ Convert Aerospikes distance strategy to langchains DistanceStrategy enum. This is a convenience method to allow users to pass in the same distance metric used to create the index. """ from aerospike_vector_search.types import VectorDistanceMetric if isinstance(distance_strategy, DistanceStrategy): return distance_strategy if distance_strategy == VectorDistanceMetric.COSINE: return DistanceStrategy.COSINE if distance_strategy == VectorDistanceMetric.DOT_PRODUCT: return DistanceStrategy.DOT_PRODUCT if distance_strategy == VectorDistanceMetric.SQUARED_EUCLIDEAN: return DistanceStrategy.EUCLIDEAN_DISTANCE raise ValueError( "Unknown distance strategy, must be cosine, dot_product" ", or euclidean" ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, set_name: Optional[str] = None, embedding_chunk_size: int = 1000, index_name: Optional[str] = None, wait_for_index: bool = True, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. set_name: Optional aerospike set name to add the texts to. batch_size: Batch size to use when adding the texts to the vectorstore. embedding_chunk_size: Chunk size to use when embedding the texts. index_name: Optional aerospike index name used for waiting for index completion. If not provided, the default index_name will be used. wait_for_index: If True, wait for the all the texts to be indexed before returning. Requires index_name to be provided. Defaults to True. kwargs: Additional keyword arguments to pass to the client upsert call. Returns: List of ids from adding the texts into the vectorstore. """ if set_name is None: set_name = self._set_name if index_name is None: index_name = self._index_name if wait_for_index and index_name is None: raise ValueError("if wait_for_index is True, index_name must be provided") texts = list(texts) ids = ids or [str(uuid.uuid4()) for _ in texts] # We need to shallow copy so that we can add the vector and text keys if metadatas: metadatas = [m.copy() for m in metadatas] else: metadatas = metadatas or [{} for _ in texts] for i in range(0, len(texts), embedding_chunk_size): chunk_texts = texts[i : i + embedding_chunk_size] chunk_ids = ids[i : i + embedding_chunk_size] chunk_metadatas = metadatas[i : i + embedding_chunk_size] embeddings = self._embed_documents(chunk_texts) for metadata, embedding, text in zip( chunk_metadatas, embeddings, chunk_texts ): metadata[self._vector_key] = embedding metadata[self._text_key] = text for id, metadata in zip(chunk_ids, chunk_metadatas): metadata[self._id_key] = id self._client.upsert( namespace=self._namespace, key=id, set_name=set_name, record_data=metadata, **kwargs, ) if wait_for_index: self._client.wait_for_index_completion( namespace=self._namespace, name=index_name, ) return ids def delete( self, ids: Optional[List[str]] = None, set_name: Optional[str] = None, **kwargs: Any, ) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments to pass to client delete call. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ from aerospike_vector_search import AVSServerError if ids: for id in ids: try: self._client.delete( namespace=self._namespace, key=id, set_name=set_name, **kwargs, ) except AVSServerError: return False return True def similarity_search_with_score( self, query: str, k: int = 4, metadata_keys: Optional[List[str]] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return aerospike documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. metadata_keys: List of metadata keys to return with the documents. If None, all metadata keys will be returned. Defaults to None. index_name: Name of the index to search. Overrides the default index_name. kwargs: Additional keyword arguments to pass to the search method. Returns: List of Documents most similar to the query and associated scores. """ return self.similarity_search_by_vector_with_score( self._embed_query(query), k=k, metadata_keys=metadata_keys, index_name=index_name, **kwargs, ) def similarity_search_by_vector_with_score( self, embedding: List[float], k: int = 4, metadata_keys: Optional[List[str]] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return aerospike documents most similar to embedding, along with scores. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. metadata_keys: List of metadata keys to return with the documents. If None, all metadata keys will be returned. Defaults to None. index_name: Name of the index to search. Overrides the default index_name. kwargs: Additional keyword arguments to pass to the client vector_search method. Returns: List of Documents most similar to the query and associated scores. """ docs = [] if metadata_keys and self._text_key not in metadata_keys: metadata_keys = [self._text_key] + metadata_keys if index_name is None: index_name = self._index_name if index_name is None: raise ValueError("index_name must be provided") results: list[Neighbor] = self._client.vector_search( index_name=index_name, namespace=self._namespace, query=embedding, limit=k, field_names=metadata_keys, **kwargs, ) for result in results: metadata = result.fields if self._text_key in metadata: text = metadata.pop(self._text_key) score = result.distance docs.append((Document(page_content=text, metadata=metadata), score)) else: logger.warning( f"Found document with no `{self._text_key}` key. Skipping." ) continue return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, metadata_keys: Optional[List[str]] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. metadata_keys: List of metadata keys to return with the documents. If None, all metadata keys will be returned. Defaults to None. index_name: Name of the index to search. Overrides the default index_name. kwargs: Additional keyword arguments to pass to the search method. Returns: List of Documents most similar to the query vector. """ return [ doc for doc, _ in self.similarity_search_by_vector_with_score( embedding, k=k, metadata_keys=metadata_keys, index_name=index_name, **kwargs, ) ] def similarity_search( self, query: str, k: int = 4, metadata_keys: Optional[List[str]] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return aerospike documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. metadata_keys: List of metadata keys to return with the documents. If None, all metadata keys will be returned. Defaults to None. index_name: Optional name of the index to search. Overrides the default index_name. Returns: List of Documents most similar to the query and score for each """ docs_and_scores = self.similarity_search_with_score( query, k=k, metadata_keys=metadata_keys, index_name=index_name, **kwargs ) return [doc for doc, _ in docs_and_scores] def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. 0 is dissimilar, 1 is similar. Aerospike's relevance_fn assume euclidean and dot product embeddings are normalized to unit norm. """ if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.DOT_PRODUCT: return self._max_inner_product_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn else: raise ValueError( "Unknown distance strategy, must be cosine, dot_product" ", or euclidean" ) @staticmethod def _cosine_relevance_score_fn(score: float) -> float: """Aerospike returns cosine distance scores between [0,2] 0 is dissimilar, 1 is similar. """ return 1 - (score / 2) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata_keys: Optional[List[str]] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata_keys: List of metadata keys to return with the documents. If None, all metadata keys will be returned. Defaults to None. index_name: Optional name of the index to search. Overrides the default index_name. Returns: List of Documents selected by maximal marginal relevance. """ if metadata_keys and self._vector_key not in metadata_keys: metadata_keys = [self._vector_key] + metadata_keys docs = self.similarity_search_by_vector( embedding, k=fetch_k, metadata_keys=metadata_keys, index_name=index_name, **kwargs, ) mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), [doc.metadata[self._vector_key] for doc in docs], k=k, lambda_mult=lambda_mult, ) if metadata_keys and self._vector_key in metadata_keys: for i in mmr_selected: docs[i].metadata.pop(self._vector_key) return [docs[i] for i in mmr_selected] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata_keys: Optional[List[str]] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. index_name: Name of the index to search. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, metadata_keys=metadata_keys, index_name=index_name, **kwargs, ) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Client = None, namespace: str = "test", index_name: Optional[str] = None, ids: Optional[List[str]] = None, embeddings_chunk_size: int = 1000, client_kwargs: Optional[dict] = None, **kwargs: Any, ) -> Aerospike: """ This is a user friendly interface that: 1. Embeds text. 2. Converts the texts into documents. 3. Adds the documents to a provided Aerospike index This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Aerospike from langchain_openai import OpenAIEmbeddings from aerospike_vector_search import Client, HostPort client = Client(seeds=HostPort(host="localhost", port=5000)) aerospike = Aerospike.from_texts( ["foo", "bar", "baz"], embedder, client, "namespace", index_name="index", vector_key="vector", distance_strategy=MODEL_DISTANCE_CALC, ) """ aerospike = cls( client, embedding, namespace, **kwargs, ) aerospike.add_texts( texts, metadatas=metadatas, ids=ids, index_name=index_name, embedding_chunk_size=embeddings_chunk_size, **(client_kwargs or {}), ) return aerospike
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/momento_vector_index.py
import logging from typing import ( TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type, TypeVar, cast, ) from uuid import uuid4 import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_env from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) VST = TypeVar("VST", bound="VectorStore") logger = logging.getLogger(__name__) if TYPE_CHECKING: from momento import PreviewVectorIndexClient class MomentoVectorIndex(VectorStore): """`Momento Vector Index` (MVI) vector store. Momento Vector Index is a serverless vector index that can be used to store and search vectors. To use you should have the ``momento`` python package installed. Example: .. code-block:: python from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import MomentoVectorIndex from momento import ( CredentialProvider, PreviewVectorIndexClient, VectorIndexConfigurations, ) vectorstore = MomentoVectorIndex( embedding=OpenAIEmbeddings(), client=PreviewVectorIndexClient( VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_environment_variable( "MOMENTO_API_KEY" ), ), index_name="my-index", ) """ def __init__( self, embedding: Embeddings, client: "PreviewVectorIndexClient", index_name: str = "default", distance_strategy: DistanceStrategy = DistanceStrategy.COSINE, text_field: str = "text", ensure_index_exists: bool = True, **kwargs: Any, ): """Initialize a Vector Store backed by Momento Vector Index. Args: embedding (Embeddings): The embedding function to use. configuration (VectorIndexConfiguration): The configuration to initialize the Vector Index with. credential_provider (CredentialProvider): The credential provider to authenticate the Vector Index with. index_name (str, optional): The name of the index to store the documents in. Defaults to "default". distance_strategy (DistanceStrategy, optional): The distance strategy to use. If you select DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses the squared Euclidean distance. Defaults to DistanceStrategy.COSINE. text_field (str, optional): The name of the metadata field to store the original text in. Defaults to "text". ensure_index_exists (bool, optional): Whether to ensure that the index exists before adding documents to it. Defaults to True. """ try: from momento import PreviewVectorIndexClient except ImportError: raise ImportError( "Could not import momento python package. " "Please install it with `pip install momento`." ) self._client: PreviewVectorIndexClient = client self._embedding = embedding self.index_name = index_name self.__validate_distance_strategy(distance_strategy) self.distance_strategy = distance_strategy self.text_field = text_field self._ensure_index_exists = ensure_index_exists @staticmethod def __validate_distance_strategy(distance_strategy: DistanceStrategy) -> None: if distance_strategy not in [ DistanceStrategy.COSINE, DistanceStrategy.MAX_INNER_PRODUCT, DistanceStrategy.MAX_INNER_PRODUCT, ]: raise ValueError(f"Distance strategy {distance_strategy} not implemented.") @property def embeddings(self) -> Embeddings: return self._embedding def _create_index_if_not_exists(self, num_dimensions: int) -> bool: """Create index if it does not exist.""" from momento.requests.vector_index import SimilarityMetric from momento.responses.vector_index import CreateIndex similarity_metric = None if self.distance_strategy == DistanceStrategy.COSINE: similarity_metric = SimilarityMetric.COSINE_SIMILARITY elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: similarity_metric = SimilarityMetric.INNER_PRODUCT elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: similarity_metric = SimilarityMetric.EUCLIDEAN_SIMILARITY else: logger.error(f"Distance strategy {self.distance_strategy} not implemented.") raise ValueError( f"Distance strategy {self.distance_strategy} not implemented." ) response = self._client.create_index( self.index_name, num_dimensions, similarity_metric ) if isinstance(response, CreateIndex.Success): return True elif isinstance(response, CreateIndex.IndexAlreadyExists): return False elif isinstance(response, CreateIndex.Error): logger.error(f"Error creating index: {response.inner_exception}") raise response.inner_exception else: logger.error(f"Unexpected response: {response}") raise Exception(f"Unexpected response: {response}") def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Iterable of strings to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadatas associated with the texts. kwargs (Any): Other optional parameters. Specifically: - ids (List[str], optional): List of ids to use for the texts. Defaults to None, in which case uuids are generated. Returns: List[str]: List of ids from adding the texts into the vectorstore. """ from momento.requests.vector_index import Item from momento.responses.vector_index import UpsertItemBatch texts = list(texts) if len(texts) == 0: return [] if metadatas is not None: for metadata, text in zip(metadatas, texts): metadata[self.text_field] = text else: metadatas = [{self.text_field: text} for text in texts] try: embeddings = self._embedding.embed_documents(texts) except NotImplementedError: embeddings = [self._embedding.embed_query(x) for x in texts] # Create index if it does not exist. # We assume that if it does exist, then it was created with the desired number # of dimensions and similarity metric. if self._ensure_index_exists: self._create_index_if_not_exists(len(embeddings[0])) if "ids" in kwargs: ids = kwargs["ids"] if len(ids) != len(embeddings): raise ValueError("Number of ids must match number of texts") else: ids = [str(uuid4()) for _ in range(len(embeddings))] batch_size = 128 for i in range(0, len(embeddings), batch_size): start = i end = min(i + batch_size, len(embeddings)) items = [ Item(id=id, vector=vector, metadata=metadata) for id, vector, metadata in zip( ids[start:end], embeddings[start:end], metadatas[start:end], ) ] response = self._client.upsert_item_batch(self.index_name, items) if isinstance(response, UpsertItemBatch.Success): pass elif isinstance(response, UpsertItemBatch.Error): raise response.inner_exception else: raise Exception(f"Unexpected response: {response}") return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID. Args: ids (List[str]): List of ids to delete. kwargs (Any): Other optional parameters (unused) Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ from momento.responses.vector_index import DeleteItemBatch if ids is None: return True response = self._client.delete_item_batch(self.index_name, ids) return isinstance(response, DeleteItemBatch.Success) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Search for similar documents to the query string. Args: query (str): The query string to search for. k (int, optional): The number of results to return. Defaults to 4. Returns: List[Document]: A list of documents that are similar to the query. """ res = self.similarity_search_with_score(query=query, k=k, **kwargs) return [doc for doc, _ in res] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Search for similar documents to the query string. Args: query (str): The query string to search for. k (int, optional): The number of results to return. Defaults to 4. kwargs (Any): Vector Store specific search parameters. The following are forwarded to the Momento Vector Index: - top_k (int, optional): The number of results to return. Returns: List[Tuple[Document, float]]: A list of tuples of the form (Document, score). """ embedding = self._embedding.embed_query(query) results = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, **kwargs ) return results def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Search for similar documents to the query vector. Args: embedding (List[float]): The query vector to search for. k (int, optional): The number of results to return. Defaults to 4. kwargs (Any): Vector Store specific search parameters. The following are forwarded to the Momento Vector Index: - top_k (int, optional): The number of results to return. Returns: List[Tuple[Document, float]]: A list of tuples of the form (Document, score). """ from momento.requests.vector_index import ALL_METADATA from momento.responses.vector_index import Search if "top_k" in kwargs: k = kwargs["k"] filter_expression = kwargs.get("filter_expression", None) response = self._client.search( self.index_name, embedding, top_k=k, metadata_fields=ALL_METADATA, filter_expression=filter_expression, ) if not isinstance(response, Search.Success): return [] results = [] for hit in response.hits: text = cast(str, hit.metadata.pop(self.text_field)) doc = Document(page_content=text, metadata=hit.metadata) pair = (doc, hit.score) results.append(pair) return results def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Search for similar documents to the query vector. Args: embedding (List[float]): The query vector to search for. k (int, optional): The number of results to return. Defaults to 4. Returns: List[Document]: A list of documents that are similar to the query. """ results = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, **kwargs ) return [doc for doc, _ in results] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ from momento.requests.vector_index import ALL_METADATA from momento.responses.vector_index import SearchAndFetchVectors filter_expression = kwargs.get("filter_expression", None) response = self._client.search_and_fetch_vectors( self.index_name, embedding, top_k=fetch_k, metadata_fields=ALL_METADATA, filter_expression=filter_expression, ) if isinstance(response, SearchAndFetchVectors.Success): pass elif isinstance(response, SearchAndFetchVectors.Error): logger.error(f"Error searching and fetching vectors: {response}") return [] else: logger.error(f"Unexpected response: {response}") raise Exception(f"Unexpected response: {response}") mmr_selected = maximal_marginal_relevance( query_embedding=np.array([embedding], dtype=np.float32), embedding_list=[hit.vector for hit in response.hits], lambda_mult=lambda_mult, k=k, ) selected = [response.hits[i].metadata for i in mmr_selected] return [ Document(page_content=metadata.pop(self.text_field, ""), metadata=metadata) # type: ignore for metadata in selected ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, **kwargs ) @classmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return the Vector Store initialized from texts and embeddings. Args: cls (Type[VST]): The Vector Store class to use to initialize the Vector Store. texts (List[str]): The texts to initialize the Vector Store with. embedding (Embeddings): The embedding function to use. metadatas (Optional[List[dict]], optional): The metadata associated with the texts. Defaults to None. kwargs (Any): Vector Store specific parameters. The following are forwarded to the Vector Store constructor and required: - index_name (str, optional): The name of the index to store the documents in. Defaults to "default". - text_field (str, optional): The name of the metadata field to store the original text in. Defaults to "text". - distance_strategy (DistanceStrategy, optional): The distance strategy to use. Defaults to DistanceStrategy.COSINE. If you select DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses the squared Euclidean distance. - ensure_index_exists (bool, optional): Whether to ensure that the index exists before adding documents to it. Defaults to True. Additionally you can either pass in a client or an API key - client (PreviewVectorIndexClient): The Momento Vector Index client to use. - api_key (Optional[str]): The configuration to use to initialize the Vector Index with. Defaults to None. If None, the configuration is initialized from the environment variable `MOMENTO_API_KEY`. Returns: VST: Momento Vector Index vector store initialized from texts and embeddings. """ from momento import ( CredentialProvider, PreviewVectorIndexClient, VectorIndexConfigurations, ) if "client" in kwargs: client = kwargs.pop("client") else: supplied_api_key = kwargs.pop("api_key", None) api_key = supplied_api_key or get_from_env("api_key", "MOMENTO_API_KEY") client = PreviewVectorIndexClient( configuration=VectorIndexConfigurations.Default.latest(), credential_provider=CredentialProvider.from_string(api_key), ) vector_db = cls(embedding=embedding, client=client, **kwargs) # type: ignore vector_db.add_texts(texts=texts, metadatas=metadatas, **kwargs) return vector_db
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/couchbase.py
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain_core._api.deprecation import deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from couchbase.cluster import Cluster @deprecated( since="0.2.4", removal="1.0", alternative_import="langchain_couchbase.CouchbaseVectorStore", ) class CouchbaseVectorStore(VectorStore): """`Couchbase Vector Store` vector store. To use it, you need - a recent installation of the `couchbase` library - a Couchbase database with a pre-defined Search index with support for vector fields Example: .. code-block:: python from langchain_community.vectorstores import CouchbaseVectorStore from langchain_openai import OpenAIEmbeddings from couchbase.cluster import Cluster from couchbase.auth import PasswordAuthenticator from couchbase.options import ClusterOptions from datetime import timedelta auth = PasswordAuthenticator(username, password) options = ClusterOptions(auth) connect_string = "couchbases://localhost" cluster = Cluster(connect_string, options) # Wait until the cluster is ready for use. cluster.wait_until_ready(timedelta(seconds=5)) embeddings = OpenAIEmbeddings() vectorstore = CouchbaseVectorStore( cluster=cluster, bucket_name="", scope_name="", collection_name="", embedding=embeddings, index_name="vector-index", ) vectorstore.add_texts(["hello", "world"]) results = vectorstore.similarity_search("ola", k=1) """ # Default batch size DEFAULT_BATCH_SIZE: int = 100 _metadata_key: str = "metadata" _default_text_key: str = "text" _default_embedding_key: str = "embedding" def _check_bucket_exists(self) -> bool: """Check if the bucket exists in the linked Couchbase cluster""" bucket_manager = self._cluster.buckets() try: bucket_manager.get_bucket(self._bucket_name) return True except Exception: return False def _check_scope_and_collection_exists(self) -> bool: """Check if the scope and collection exists in the linked Couchbase bucket Raises a ValueError if either is not found""" scope_collection_map: Dict[str, Any] = {} # Get a list of all scopes in the bucket for scope in self._bucket.collections().get_all_scopes(): scope_collection_map[scope.name] = [] # Get a list of all the collections in the scope for collection in scope.collections: scope_collection_map[scope.name].append(collection.name) # Check if the scope exists if self._scope_name not in scope_collection_map.keys(): raise ValueError( f"Scope {self._scope_name} not found in Couchbase " f"bucket {self._bucket_name}" ) # Check if the collection exists in the scope if self._collection_name not in scope_collection_map[self._scope_name]: raise ValueError( f"Collection {self._collection_name} not found in scope " f"{self._scope_name} in Couchbase bucket {self._bucket_name}" ) return True def _check_index_exists(self) -> bool: """Check if the Search index exists in the linked Couchbase cluster Raises a ValueError if the index does not exist""" if self._scoped_index: all_indexes = [ index.name for index in self._scope.search_indexes().get_all_indexes() ] if self._index_name not in all_indexes: raise ValueError( f"Index {self._index_name} does not exist. " " Please create the index before searching." ) else: all_indexes = [ index.name for index in self._cluster.search_indexes().get_all_indexes() ] if self._index_name not in all_indexes: raise ValueError( f"Index {self._index_name} does not exist. " " Please create the index before searching." ) return True def __init__( self, cluster: Cluster, bucket_name: str, scope_name: str, collection_name: str, embedding: Embeddings, index_name: str, *, text_key: Optional[str] = _default_text_key, embedding_key: Optional[str] = _default_embedding_key, scoped_index: bool = True, ) -> None: """ Initialize the Couchbase Vector Store. Args: cluster (Cluster): couchbase cluster object with active connection. bucket_name (str): name of bucket to store documents in. scope_name (str): name of scope in the bucket to store documents in. collection_name (str): name of collection in the scope to store documents in embedding (Embeddings): embedding function to use. index_name (str): name of the Search index to use. text_key (optional[str]): key in document to use as text. Set to text by default. embedding_key (optional[str]): key in document to use for the embeddings. Set to embedding by default. scoped_index (optional[bool]): specify whether the index is a scoped index. Set to True by default. """ try: from couchbase.cluster import Cluster except ImportError as e: raise ImportError( "Could not import couchbase python package. " "Please install couchbase SDK with `pip install couchbase`." ) from e if not isinstance(cluster, Cluster): raise ValueError( f"cluster should be an instance of couchbase.Cluster, " f"got {type(cluster)}" ) self._cluster = cluster if not embedding: raise ValueError("Embeddings instance must be provided.") if not bucket_name: raise ValueError("bucket_name must be provided.") if not scope_name: raise ValueError("scope_name must be provided.") if not collection_name: raise ValueError("collection_name must be provided.") if not index_name: raise ValueError("index_name must be provided.") self._bucket_name = bucket_name self._scope_name = scope_name self._collection_name = collection_name self._embedding_function = embedding self._text_key = text_key self._embedding_key = embedding_key self._index_name = index_name self._scoped_index = scoped_index # Check if the bucket exists if not self._check_bucket_exists(): raise ValueError( f"Bucket {self._bucket_name} does not exist. " " Please create the bucket before searching." ) try: self._bucket = self._cluster.bucket(self._bucket_name) self._scope = self._bucket.scope(self._scope_name) self._collection = self._scope.collection(self._collection_name) except Exception as e: raise ValueError( "Error connecting to couchbase. " "Please check the connection and credentials." ) from e # Check if the scope and collection exists. Throws ValueError if they don't try: self._check_scope_and_collection_exists() except Exception as e: raise e # Check if the index exists. Throws ValueError if it doesn't try: self._check_index_exists() except Exception as e: raise e def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, ids: Optional[List[str]] = None, batch_size: Optional[int] = None, **kwargs: Any, ) -> List[str]: """Run texts through the embeddings and persist in vectorstore. If the document IDs are passed, the existing documents (if any) will be overwritten with the new ones. Args: texts (Iterable[str]): Iterable of strings to add to the vectorstore. metadatas (Optional[List[Dict]]): Optional list of metadatas associated with the texts. ids (Optional[List[str]]): Optional list of ids associated with the texts. IDs have to be unique strings across the collection. If it is not specified uuids are generated and used as ids. batch_size (Optional[int]): Optional batch size for bulk insertions. Default is 100. Returns: List[str]:List of ids from adding the texts into the vectorstore. """ from couchbase.exceptions import DocumentExistsException if not batch_size: batch_size = self.DEFAULT_BATCH_SIZE doc_ids: List[str] = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedded_texts = self._embedding_function.embed_documents(list(texts)) documents_to_insert = [ { id: { self._text_key: text, self._embedding_key: vector, self._metadata_key: metadata, } for id, text, vector, metadata in zip( ids, texts, embedded_texts, metadatas ) } ] # Insert in batches for i in range(0, len(documents_to_insert), batch_size): batch = documents_to_insert[i : i + batch_size] try: result = self._collection.upsert_multi(batch[0]) if result.all_ok: doc_ids.extend(batch[0].keys()) except DocumentExistsException as e: raise ValueError(f"Document already exists: {e}") return doc_ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete documents from the vector store by ids. Args: ids (List[str]): List of IDs of the documents to delete. batch_size (Optional[int]): Optional batch size for bulk deletions. Returns: bool: True if all the documents were deleted successfully, False otherwise. """ from couchbase.exceptions import DocumentNotFoundException if ids is None: raise ValueError("No document ids provided to delete.") batch_size = kwargs.get("batch_size", self.DEFAULT_BATCH_SIZE) deletion_status = True # Delete in batches for i in range(0, len(ids), batch_size): batch = ids[i : i + batch_size] try: result = self._collection.remove_multi(batch) except DocumentNotFoundException as e: deletion_status = False raise ValueError(f"Document not found: {e}") deletion_status &= result.all_ok return deletion_status @property def embeddings(self) -> Embeddings: """Return the query embedding object.""" return self._embedding_function def _format_metadata(self, row_fields: Dict[str, Any]) -> Dict[str, Any]: """Helper method to format the metadata from the Couchbase Search API. Args: row_fields (Dict[str, Any]): The fields to format. Returns: Dict[str, Any]: The formatted metadata. """ metadata = {} for key, value in row_fields.items(): # Couchbase Search returns the metadata key with a prefix # `metadata.` We remove it to get the original metadata key if key.startswith(self._metadata_key): new_key = key.split(self._metadata_key + ".")[-1] metadata[new_key] = value else: metadata[key] = value return metadata def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, search_options: Optional[Dict[str, Any]] = {}, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector with their scores. Args: embedding (List[float]): Embedding vector to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. search_options (Optional[Dict[str, Any]]): Optional search options that are passed to Couchbase search. Defaults to empty dictionary. fields (Optional[List[str]]): Optional list of fields to include in the metadata of results. Note that these need to be stored in the index. If nothing is specified, defaults to all the fields stored in the index. Returns: List of (Document, score) that are the most similar to the query vector. """ import couchbase.search as search from couchbase.options import SearchOptions from couchbase.vector_search import VectorQuery, VectorSearch fields = kwargs.get("fields", ["*"]) # Document text field needs to be returned from the search if fields != ["*"] and self._text_key not in fields: fields.append(self._text_key) search_req = search.SearchRequest.create( VectorSearch.from_vector_query( VectorQuery( self._embedding_key, embedding, k, ) ) ) try: if self._scoped_index: search_iter = self._scope.search( self._index_name, search_req, SearchOptions( limit=k, fields=fields, raw=search_options, ), ) else: search_iter = self._cluster.search( index=self._index_name, request=search_req, options=SearchOptions(limit=k, fields=fields, raw=search_options), ) docs_with_score = [] # Parse the results for row in search_iter.rows(): text = row.fields.pop(self._text_key, "") # Format the metadata from Couchbase metadata = self._format_metadata(row.fields) score = row.score doc = Document(page_content=text, metadata=metadata) docs_with_score.append((doc, score)) except Exception as e: raise ValueError(f"Search failed with error: {e}") return docs_with_score def similarity_search( self, query: str, k: int = 4, search_options: Optional[Dict[str, Any]] = {}, **kwargs: Any, ) -> List[Document]: """Return documents most similar to embedding vector with their scores. Args: query (str): Query to look up for similar documents k (int): Number of Documents to return. Defaults to 4. search_options (Optional[Dict[str, Any]]): Optional search options that are passed to Couchbase search. Defaults to empty dictionary fields (Optional[List[str]]): Optional list of fields to include in the metadata of results. Note that these need to be stored in the index. If nothing is specified, defaults to all the fields stored in the index. Returns: List of Documents most similar to the query. """ query_embedding = self.embeddings.embed_query(query) docs_with_scores = self.similarity_search_with_score_by_vector( query_embedding, k, search_options, **kwargs ) return [doc for doc, _ in docs_with_scores] def similarity_search_with_score( self, query: str, k: int = 4, search_options: Optional[Dict[str, Any]] = {}, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return documents that are most similar to the query with their scores. Args: query (str): Query to look up for similar documents k (int): Number of Documents to return. Defaults to 4. search_options (Optional[Dict[str, Any]]): Optional search options that are passed to Couchbase search. Defaults to empty dictionary. fields (Optional[List[str]]): Optional list of fields to include in the metadata of results. Note that these need to be stored in the index. If nothing is specified, defaults to text and metadata fields. Returns: List of (Document, score) that are most similar to the query. """ query_embedding = self.embeddings.embed_query(query) docs_with_score = self.similarity_search_with_score_by_vector( query_embedding, k, search_options, **kwargs ) return docs_with_score def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_options: Optional[Dict[str, Any]] = {}, **kwargs: Any, ) -> List[Document]: """Return documents that are most similar to the vector embedding. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. search_options (Optional[Dict[str, Any]]): Optional search options that are passed to Couchbase search. Defaults to empty dictionary. fields (Optional[List[str]]): Optional list of fields to include in the metadata of results. Note that these need to be stored in the index. If nothing is specified, defaults to document text and metadata fields. Returns: List of Documents most similar to the query. """ docs_with_score = self.similarity_search_with_score_by_vector( embedding, k, search_options, **kwargs ) return [doc for doc, _ in docs_with_score] @classmethod def _from_kwargs( cls: Type[CouchbaseVectorStore], embedding: Embeddings, **kwargs: Any, ) -> CouchbaseVectorStore: """Initialize the Couchbase vector store from keyword arguments for the vector store. Args: embedding: Embedding object to use to embed text. **kwargs: Keyword arguments to initialize the vector store with. Accepted arguments are: - cluster - bucket_name - scope_name - collection_name - index_name - text_key - embedding_key - scoped_index """ cluster = kwargs.get("cluster", None) bucket_name = kwargs.get("bucket_name", None) scope_name = kwargs.get("scope_name", None) collection_name = kwargs.get("collection_name", None) index_name = kwargs.get("index_name", None) text_key = kwargs.get("text_key", cls._default_text_key) embedding_key = kwargs.get("embedding_key", cls._default_embedding_key) scoped_index = kwargs.get("scoped_index", True) return cls( embedding=embedding, cluster=cluster, bucket_name=bucket_name, scope_name=scope_name, collection_name=collection_name, index_name=index_name, text_key=text_key, embedding_key=embedding_key, scoped_index=scoped_index, ) @classmethod def from_texts( cls: Type[CouchbaseVectorStore], texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> CouchbaseVectorStore: """Construct a Couchbase vector store from a list of texts. Example: .. code-block:: python from langchain_community.vectorstores import CouchbaseVectorStore from langchain_openai import OpenAIEmbeddings from couchbase.cluster import Cluster from couchbase.auth import PasswordAuthenticator from couchbase.options import ClusterOptions from datetime import timedelta auth = PasswordAuthenticator(username, password) options = ClusterOptions(auth) connect_string = "couchbases://localhost" cluster = Cluster(connect_string, options) # Wait until the cluster is ready for use. cluster.wait_until_ready(timedelta(seconds=5)) embeddings = OpenAIEmbeddings() texts = ["hello", "world"] vectorstore = CouchbaseVectorStore.from_texts( texts, embedding=embeddings, cluster=cluster, bucket_name="", scope_name="", collection_name="", index_name="vector-index", ) Args: texts (List[str]): list of texts to add to the vector store. embedding (Embeddings): embedding function to use. metadatas (optional[List[Dict]): list of metadatas to add to documents. **kwargs: Keyword arguments used to initialize the vector store with and/or passed to `add_texts` method. Check the constructor and/or `add_texts` for the list of accepted arguments. Returns: A Couchbase vector store. """ vector_store = cls._from_kwargs(embedding, **kwargs) batch_size = kwargs.get("batch_size", vector_store.DEFAULT_BATCH_SIZE) ids = kwargs.get("ids", None) vector_store.add_texts( texts, metadatas=metadatas, ids=ids, batch_size=batch_size ) return vector_store
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/bageldb.py
from langchain_community.vectorstores.bagel import Bagel __all__ = ["Bagel"]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/dashvector.py
from __future__ import annotations import logging import uuid from typing import ( Any, Iterable, List, Optional, Tuple, ) import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_env from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class DashVector(VectorStore): """`DashVector` vector store. To use, you should have the ``dashvector`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import DashVector from langchain_community.embeddings.openai import OpenAIEmbeddings import dashvector client = dashvector.Client(api_key="***") client.create("langchain", dimension=1024) collection = client.get("langchain") embeddings = OpenAIEmbeddings() vectorstore = DashVector(collection, embeddings.embed_query, "text") """ def __init__( self, collection: Any, embedding: Embeddings, text_field: str, ): """Initialize with DashVector collection.""" try: import dashvector except ImportError: raise ImportError( "Could not import dashvector python package. " "Please install it with `pip install dashvector`." ) if not isinstance(collection, dashvector.Collection): raise ValueError( f"collection should be an instance of dashvector.Collection, " f"bug got {type(collection)}" ) self._collection = collection self._embedding = embedding self._text_field = text_field def _create_partition_if_not_exists(self, partition: str) -> None: """Create a Partition in current Collection.""" self._collection.create_partition(partition) def _similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[str] = None, partition: str = "default", ) -> List[Tuple[Document, float]]: """Return docs most similar to query vector, along with scores""" # query by vector ret = self._collection.query( embedding, topk=k, filter=filter, partition=partition ) if not ret: raise ValueError( f"Fail to query docs by vector, error {self._collection.message}" ) docs = [] for doc in ret: metadata = doc.fields text = metadata.pop(self._text_field) score = doc.score docs.append((Document(page_content=text, metadata=metadata), score)) return docs def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 25, partition: str = "default", **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. batch_size: Optional batch size to upsert docs. partition: a partition name in collection. [optional]. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ self._create_partition_if_not_exists(partition) ids = ids or [str(uuid.uuid4().hex) for _ in texts] text_list = list(texts) for i in range(0, len(text_list), batch_size): # batch end end = min(i + batch_size, len(text_list)) batch_texts = text_list[i:end] batch_ids = ids[i:end] batch_embeddings = self._embedding.embed_documents(list(batch_texts)) # batch metadatas if metadatas: batch_metadatas = metadatas[i:end] else: batch_metadatas = [{} for _ in range(i, end)] for metadata, text in zip(batch_metadatas, batch_texts): metadata[self._text_field] = text # batch upsert to collection docs = list(zip(batch_ids, batch_embeddings, batch_metadatas)) ret = self._collection.upsert(docs, partition=partition) if not ret: raise ValueError( f"Fail to upsert docs to dashvector vector database," f"Error: {ret.message}" ) return ids def delete( self, ids: Optional[List[str]] = None, partition: str = "default", **kwargs: Any ) -> bool: """Delete by vector ID. Args: ids: List of ids to delete. partition: a partition name in collection. [optional]. Returns: True if deletion is successful, False otherwise. """ return bool(self._collection.delete(ids, partition=partition)) def similarity_search( self, query: str, k: int = 4, filter: Optional[str] = None, partition: str = "default", **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to search documents similar to. k: Number of documents to return. Default to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. partition: a partition name in collection. [optional]. Returns: List of Documents most similar to the query text. """ docs_and_scores = self.similarity_search_with_relevance_scores( query, k, filter, partition ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, filter: Optional[str] = None, partition: str = "default", **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query text , alone with relevance scores. Less is more similar, more is more dissimilar. Args: query: input text k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. partition: a partition name in collection. [optional]. Returns: List of Tuples of (doc, similarity_score) """ embedding = self._embedding.embed_query(query) return self._similarity_search_with_score_by_vector( embedding, k=k, filter=filter, partition=partition ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[str] = None, partition: str = "default", **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. partition: a partition name in collection. [optional]. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self._similarity_search_with_score_by_vector( embedding, k, filter, partition ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, partition: str = "default", **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. partition: a partition name in collection. [optional]. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter, partition ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, partition: str = "default", **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Doc fields filter conditions that meet the SQL where clause specification. partition: a partition name in collection. [optional]. Returns: List of Documents selected by maximal marginal relevance. """ # query by vector ret = self._collection.query( embedding, topk=fetch_k, filter=filter, partition=partition, include_vector=True, ) if not ret: raise ValueError( f"Fail to query docs by vector, error {self._collection.message}" ) candidate_embeddings = [doc.vector for doc in ret] mmr_selected = maximal_marginal_relevance( np.array(embedding), candidate_embeddings, lambda_mult, k ) metadatas = [ret.output[i].fields for i in mmr_selected] return [ Document(page_content=metadata.pop(self._text_field), metadata=metadata) for metadata in metadatas ] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, dashvector_api_key: Optional[str] = None, dashvector_endpoint: Optional[str] = None, collection_name: str = "langchain", text_field: str = "text", batch_size: int = 25, ids: Optional[List[str]] = None, **kwargs: Any, ) -> DashVector: """Return DashVector VectorStore initialized from texts and embeddings. This is the quick way to get started with dashvector vector store. Example: .. code-block:: python from langchain_community.vectorstores import DashVector from langchain_community.embeddings import OpenAIEmbeddings import dashvector embeddings = OpenAIEmbeddings() dashvector = DashVector.from_documents( docs, embeddings, dashvector_api_key="{DASHVECTOR_API_KEY}" ) """ try: import dashvector except ImportError: raise ImportError( "Could not import dashvector python package. " "Please install it with `pip install dashvector`." ) dashvector_api_key = dashvector_api_key or get_from_env( "dashvector_api_key", "DASHVECTOR_API_KEY" ) dashvector_endpoint = dashvector_endpoint or get_from_env( "dashvector_endpoint", "DASHVECTOR_ENDPOINT", default="dashvector.cn-hangzhou.aliyuncs.com", ) dashvector_client = dashvector.Client( api_key=dashvector_api_key, endpoint=dashvector_endpoint ) dashvector_client.delete(collection_name) collection = dashvector_client.get(collection_name) if not collection: dim = len(embedding.embed_query(texts[0])) # create collection if not existed resp = dashvector_client.create(collection_name, dimension=dim) if resp: collection = dashvector_client.get(collection_name) else: raise ValueError( "Fail to create collection. " f"Error: {resp.message}." ) dashvector_vector_db = cls(collection, embedding, text_field) dashvector_vector_db.add_texts(texts, metadatas, ids, batch_size) return dashvector_vector_db
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/lantern.py
from __future__ import annotations import contextlib import enum import logging import uuid from typing import ( Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, Type, Union, ) import numpy as np import sqlalchemy from sqlalchemy import delete, func from sqlalchemy.dialects.postgresql import JSON, UUID from sqlalchemy.exc import ProgrammingError from sqlalchemy.orm import Session from sqlalchemy.sql import quoted_name from langchain_community.vectorstores.utils import maximal_marginal_relevance try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" def _results_to_docs(docs_and_scores: Any) -> List[Document]: """Return docs from docs and scores.""" return [doc for doc, _ in docs_and_scores] class BaseEmbeddingStore: """Base class for the Lantern embedding store.""" def get_embedding_store( distance_strategy: DistanceStrategy, collection_name: str ) -> Any: """Get the embedding store class.""" embedding_type = None if distance_strategy == DistanceStrategy.HAMMING: embedding_type = sqlalchemy.INTEGER # type: ignore else: embedding_type = sqlalchemy.REAL # type: ignore DynamicBase = declarative_base(class_registry=dict()) # type: Any class EmbeddingStore(DynamicBase, BaseEmbeddingStore): __tablename__ = collection_name uuid = sqlalchemy.Column( UUID(as_uuid=True), primary_key=True, default=uuid.uuid4 ) __table_args__ = {"extend_existing": True} document = sqlalchemy.Column(sqlalchemy.String, nullable=True) cmetadata = sqlalchemy.Column(JSON, nullable=True) # custom_id : any user defined id custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) embedding = sqlalchemy.Column(sqlalchemy.ARRAY(embedding_type)) # type: ignore return EmbeddingStore class QueryResult: """Result from a query.""" EmbeddingStore: BaseEmbeddingStore distance: float class DistanceStrategy(str, enum.Enum): """Enumerator of the Distance strategies.""" EUCLIDEAN = "l2sq" COSINE = "cosine" HAMMING = "hamming" DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE class Lantern(VectorStore): """`Postgres` with the `lantern` extension as a vector store. lantern uses sequential scan by default. but you can create a HNSW index using the create_hnsw_index method. - `connection_string` is a postgres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is the name of the table in which embedding data will be stored The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `COSINE` is the cosine distance. - `HAMMING` is the hamming distance. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict] = None, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, relevance_score_fn: Optional[Callable[[float], float]] = None, ) -> None: self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self._distance_strategy = distance_strategy self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.EmbeddingStore = get_embedding_store( self.distance_strategy, collection_name ) self.__post_init__() def __post_init__( self, ) -> None: self._conn = self.connect() self.create_hnsw_extension() self.create_collection() @property def distance_strategy(self) -> DistanceStrategy: if isinstance(self._distance_strategy, DistanceStrategy): return self._distance_strategy if self._distance_strategy == DistanceStrategy.EUCLIDEAN.value: return DistanceStrategy.EUCLIDEAN elif self._distance_strategy == DistanceStrategy.COSINE.value: return DistanceStrategy.COSINE elif self._distance_strategy == DistanceStrategy.HAMMING.value: return DistanceStrategy.HAMMING else: raise ValueError( f"Got unexpected value for distance: {self._distance_strategy}. " f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}." ) @property def embeddings(self) -> Embeddings: return self.embedding_function @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}" def connect(self) -> sqlalchemy.engine.Connection: engine = sqlalchemy.create_engine(self.connection_string) conn = engine.connect() return conn @property def distance_function(self) -> Any: if self.distance_strategy == DistanceStrategy.EUCLIDEAN: return "l2sq_dist" elif self.distance_strategy == DistanceStrategy.COSINE: return "cos_dist" elif self.distance_strategy == DistanceStrategy.HAMMING: return "hamming_dist" def create_hnsw_extension(self) -> None: try: with Session(self._conn) as session: statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS lantern") session.execute(statement) session.commit() except Exception as e: self.logger.exception(e) def create_tables_if_not_exists(self) -> None: try: self.create_collection() except ProgrammingError: pass def drop_table(self) -> None: try: self.EmbeddingStore.__table__.drop(self._conn.engine) except ProgrammingError: pass def drop_tables(self) -> None: self.drop_table() def _hamming_relevance_score_fn(self, distance: float) -> float: return distance def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self.distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN: return self._euclidean_relevance_score_fn elif self.distance_strategy == DistanceStrategy.HAMMING: return self._hamming_relevance_score_fn else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to Lantern constructor." ) def _get_op_class(self) -> str: if self.distance_strategy == DistanceStrategy.COSINE: return "dist_cos_ops" elif self.distance_strategy == DistanceStrategy.EUCLIDEAN: return "dist_l2sq_ops" elif self.distance_strategy == DistanceStrategy.HAMMING: return "dist_hamming_ops" else: raise ValueError( "No supported operator class" f" for distance_strategy of {self._distance_strategy}." ) def _get_operator(self) -> str: if self.distance_strategy == DistanceStrategy.COSINE: return "<=>" elif self.distance_strategy == DistanceStrategy.EUCLIDEAN: return "<->" elif self.distance_strategy == DistanceStrategy.HAMMING: return "<+>" else: raise ValueError( "No supported operator" f" for distance_strategy of {self._distance_strategy}." ) def _typed_arg_for_distance( self, embedding: List[Union[float, int]] ) -> List[Union[float, int]]: if self.distance_strategy == DistanceStrategy.HAMMING: return list(map(lambda x: int(x), embedding)) return embedding @property def _index_name(self) -> str: return f"langchain_{self.collection_name}_idx" def create_hnsw_index( self, dims: int = ADA_TOKEN_COUNT, m: int = 16, ef_construction: int = 64, ef_search: int = 64, **_kwargs: Any, ) -> None: """Create HNSW index on collection. Optional Keyword Args for HNSW Index: engine: "nmslib", "faiss", "lucene"; default: "nmslib" ef: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 64 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 64 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 dims: Dimensions of the vectors in collection. default: 1536 """ create_index_query = sqlalchemy.text( "CREATE INDEX IF NOT EXISTS {} " "ON {} USING hnsw (embedding {}) " "WITH (" "dim = :dim, " "m = :m, " "ef_construction = :ef_construction, " "ef = :ef" ");".format( quoted_name(self._index_name, True), quoted_name(self.collection_name, True), self._get_op_class(), ) ) with Session(self._conn) as session: # Create the HNSW index session.execute( create_index_query, { "dim": dims, "m": m, "ef_construction": ef_construction, "ef": ef_search, }, ) session.commit() self.logger.info("HNSW extension and index created successfully.") def drop_index(self) -> None: with Session(self._conn) as session: # Drop the HNSW index session.execute( sqlalchemy.text( "DROP INDEX IF EXISTS {}".format( quoted_name(self._index_name, True) ) ) ) session.commit() def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() self.drop_table() with self._conn.begin(): try: self.EmbeddingStore.__table__.create(self._conn.engine) except ProgrammingError as e: # Duplicate table if e.code == "f405": pass else: raise e def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") self.drop_table() @contextlib.contextmanager def _make_session(self) -> Generator[Session, None, None]: """Create a context manager for the session, bind to _conn string.""" yield Session(self._conn) def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> None: """Delete vectors by ids or uuids. Args: ids: List of ids to delete. """ with Session(self._conn) as session: if ids is not None: self.logger.debug( "Trying to delete vectors by ids (represented by the model " "using the custom ids field)" ) stmt = delete(self.EmbeddingStore).where( self.EmbeddingStore.custom_id.in_(ids) ) session.execute(stmt) session.commit() @classmethod def _initialize_from_embeddings( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, pre_delete_collection: bool = False, **kwargs: Any, ) -> Lantern: """ Order of elements for lists `ids`, `embeddings`, `texts`, `metadatas` should match, so each row will be associated with correct values. Postgres connection string is required "Either pass it as `connection_string` parameter or set the LANTERN_CONNECTION_STRING environment variable. - `texts` texts to insert into collection. - `embeddings` an Embeddings to insert into collection - `embedding` is :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. - `metadatas` row metadata to insert into collection. - `ids` row ids to insert into collection. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is the name of the table in which embedding data will be stored The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `COSINE` is the cosine distance. - `HAMMING` is the hamming distance. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ if ids is None: ids = [str(uuid.uuid4()) for _ in texts] if not metadatas: metadatas = [{} for _ in texts] connection_string = cls.__get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, pre_delete_collection=pre_delete_collection, distance_strategy=distance_strategy, ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) store.create_hnsw_index(**kwargs) return store def add_embeddings( self, texts: List[str], embeddings: List[List[float]], metadatas: List[dict], ids: List[str], **kwargs: Any, ) -> None: with Session(self._conn) as session: for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = self.EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, ) session.add(embedding_store) session.commit() def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: if ids is None: ids = [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] with Session(self._conn) as session: for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = self.EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, ) session.add(embedding_store) session.commit() return ids def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]: """Return docs and scores from results.""" docs = [ ( Document( page_content=result.EmbeddingStore.document, metadata=result.EmbeddingStore.cmetadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return docs def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: results = self.__query_collection(embedding=embedding, k=k, filter=filter) return self._results_to_docs_and_scores(results) def __query_collection( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Any]: with Session(self._conn) as session: set_enable_seqscan_stmt = sqlalchemy.text("SET enable_seqscan = off") set_init_k = sqlalchemy.text("SET hnsw.init_k = :k") session.execute(set_enable_seqscan_stmt) session.execute(set_init_k, {"k": k}) filter_by = None if filter is not None: filter_clauses = [] for key, value in filter.items(): IN = "in" if isinstance(value, dict) and IN in map(str.lower, value): value_case_insensitive = { k.lower(): v for k, v in value.items() } filter_by_metadata = self.EmbeddingStore.cmetadata[ key ].astext.in_(value_case_insensitive[IN]) filter_clauses.append(filter_by_metadata) else: filter_by_metadata = self.EmbeddingStore.cmetadata[ key ].astext == str(value) filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(*filter_clauses) embedding = self._typed_arg_for_distance(embedding) query = session.query( self.EmbeddingStore, getattr(func, self.distance_function)( self.EmbeddingStore.embedding, embedding ).label("distance"), ) # Specify the columns you need here, e.g., EmbeddingStore.embedding if filter_by is not None: query = query.filter(filter_by) results: List[QueryResult] = ( query.order_by( self.EmbeddingStore.embedding.op(self._get_operator())(embedding) ) # Using PostgreSQL specific operator with the correct column name .limit(k) .all() ) return results def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return _results_to_docs(docs_and_scores) @classmethod def from_texts( cls: Type[Lantern], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Lantern: """ Initialize Lantern vectorstore from list of texts. The embeddings will be generated using `embedding` class provided. Order of elements for lists `ids`, `texts`, `metadatas` should match, so each row will be associated with correct values. Postgres connection string is required "Either pass it as `connection_string` parameter or set the LANTERN_CONNECTION_STRING environment variable. - `connection_string` is fully populated connection string for postgres database - `texts` texts to insert into collection. - `embedding` is :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. - `metadatas` row metadata to insert into collection. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is the name of the table in which embedding data will be stored The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `COSINE` is the cosine distance. - `HAMMING` is the hamming distance. - `ids` row ids to insert into collection. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ embeddings = embedding.embed_documents(list(texts)) return cls._initialize_from_embeddings( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, distance_strategy=distance_strategy, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, **kwargs: Any, ) -> Lantern: """Construct Lantern wrapper from raw documents and pre- generated embeddings. Postgres connection string is required "Either pass it as `connection_string` parameter or set the LANTERN_CONNECTION_STRING environment variable. Order of elements for lists `ids`, `text_embeddings`, `metadatas` should match, so each row will be associated with correct values. - `connection_string` is fully populated connection string for postgres database - `text_embeddings` is array with tuples (text, embedding) to insert into collection. - `embedding` is :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. - `metadatas` row metadata to insert into collection. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is the name of the table in which embedding data will be stored The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `ids` row ids to insert into collection. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `COSINE` is the cosine distance. - `HAMMING` is the hamming distance. """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls._initialize_from_embeddings( texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, distance_strategy=distance_strategy, **kwargs, ) @classmethod def from_existing_index( cls: Type[Lantern], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, pre_delete_collection: bool = False, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, **kwargs: Any, ) -> Lantern: """ Get instance of an existing Lantern store.This method will return the instance of the store without inserting any new embeddings Postgres connection string is required "Either pass it as `connection_string` parameter or set the LANTERN_CONNECTION_STRING environment variable. - `connection_string` is a postgres connection string. - `embedding` is :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is the name of the table in which embedding data will be stored The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `ids` row ids to insert into collection. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `COSINE` is the cosine distance. - `HAMMING` is the hamming distance. """ connection_string = cls.__get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, pre_delete_collection=pre_delete_collection, distance_strategy=distance_strategy, ) return store @classmethod def __get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="LANTERN_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as `connection_string` parameter" "or set the LANTERN_CONNECTION_STRING variable." ) return connection_string @classmethod def from_documents( cls: Type[Lantern], documents: List[Document], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Lantern: """ Initialize a vector store with a set of documents. Postgres connection string is required "Either pass it as `connection_string` parameter or set the LANTERN_CONNECTION_STRING environment variable. - `connection_string` is a postgres connection string. - `documents` is list of :class:`Document` to initialize the vector store with - `embedding` is :class:`Embeddings` that will be used for embedding the text sent. If none is sent, then the multilingual Tensorflow Universal Sentence Encoder will be used. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is the name of the table in which embedding data will be stored The table will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN) - `EUCLIDEAN` is the euclidean distance. - `COSINE` is the cosine distance. - `HAMMING` is the hamming distance. - `ids` row ids to insert into collection. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.__get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy=distance_strategy, **kwargs, ) def max_marginal_relevance_search_with_score_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter) embedding_list = [result.EmbeddingStore.embedding for result in results] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embedding_list, k=k, lambda_mult=lambda_mult, ) candidates = self._results_to_docs_and_scores(results) return [r for i, r in enumerate(candidates) if i in mmr_selected] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) def max_marginal_relevance_search_with_score( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs selected using the maximal marginal relevance with score. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of Documents selected by maximal marginal relevance to the query and score for each. """ embedding = self.embedding_function.embed_query(query) docs = self.max_marginal_relevance_search_with_score_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return docs def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs, ) return _results_to_docs(docs_and_scores)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/weaviate.py
from __future__ import annotations import datetime import os from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, ) from uuid import uuid4 import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance if TYPE_CHECKING: import weaviate def _default_schema(index_name: str, text_key: str) -> Dict: return { "class": index_name, "properties": [ { "name": text_key, "dataType": ["text"], } ], } def _create_weaviate_client( url: Optional[str] = None, api_key: Optional[str] = None, **kwargs: Any, ) -> weaviate.Client: try: import weaviate except ImportError: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`" ) url = url or os.environ.get("WEAVIATE_URL") api_key = api_key or os.environ.get("WEAVIATE_API_KEY") auth = weaviate.auth.AuthApiKey(api_key=api_key) if api_key else None return weaviate.Client(url=url, auth_client_secret=auth, **kwargs) def _default_score_normalizer(val: float) -> float: return 1 - 1 / (1 + np.exp(val)) def _json_serializable(value: Any) -> Any: if isinstance(value, datetime.datetime): return value.isoformat() return value class Weaviate(VectorStore): """`Weaviate` vector store. To use, you should have the ``weaviate-client`` python package installed. Example: .. code-block:: python import weaviate from langchain_community.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key) """ def __init__( self, client: Any, index_name: str, text_key: str, embedding: Optional[Embeddings] = None, attributes: Optional[List[str]] = None, relevance_score_fn: Optional[ Callable[[float], float] ] = _default_score_normalizer, by_text: bool = True, ): """Initialize with Weaviate client.""" try: import weaviate except ImportError: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._query_attrs = [self._text_key] self.relevance_score_fn = relevance_score_fn self._by_text = by_text if attributes is not None: self._query_attrs.extend(attributes) @property def embeddings(self) -> Optional[Embeddings]: return self._embedding def _select_relevance_score_fn(self) -> Callable[[float], float]: return ( self.relevance_score_fn if self.relevance_score_fn else _default_score_normalizer ) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Weaviate.""" from weaviate.util import get_valid_uuid ids = [] embeddings: Optional[List[List[float]]] = None if self._embedding: if not isinstance(texts, list): texts = list(texts) embeddings = self._embedding.embed_documents(texts) with self._client.batch as batch: for i, text in enumerate(texts): data_properties = {self._text_key: text} if metadatas is not None: for key, val in metadatas[i].items(): data_properties[key] = _json_serializable(val) # Allow for ids (consistent w/ other methods) # # Or uuids (backwards compatible w/ existing arg) # If the UUID of one of the objects already exists # then the existing object will be replaced by the new object. _id = get_valid_uuid(uuid4()) if "uuids" in kwargs: _id = kwargs["uuids"][i] elif "ids" in kwargs: _id = kwargs["ids"][i] batch.add_data_object( data_object=data_properties, class_name=self._index_name, uuid=_id, vector=embeddings[i] if embeddings else None, tenant=kwargs.get("tenant"), ) ids.append(_id) return ids def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ if self._by_text: return self.similarity_search_by_text(query, k, **kwargs) else: if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search when " "_by_text=False" ) embedding = self._embedding.embed_query(query) return self.similarity_search_by_vector(embedding, k, **kwargs) def similarity_search_by_text( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_text(content).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Look up similar documents by embedding vector in Weaviate.""" vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) if kwargs.get("additional"): query_obj = query_obj.with_additional(kwargs.get("additional")) result = query_obj.with_near_vector(vector).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding is not None: embedding = self._embedding.embed_query(query) else: raise ValueError( "max_marginal_relevance_search requires a suitable Embeddings object" ) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs ) def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) results = ( query_obj.with_additional("vector") .with_near_vector(vector) .with_limit(fetch_k) .do() ) payload = results["data"]["Get"][self._index_name] embeddings = [result["_additional"]["vector"] for result in payload] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) docs = [] for idx in mmr_selected: text = payload[idx].pop(self._text_key) payload[idx].pop("_additional") meta = payload[idx] docs.append(Document(page_content=text, metadata=meta)) return docs def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Return list of documents most similar to the query text and cosine distance in float for each. Lower score represents more similarity. """ if self._embedding is None: raise ValueError( "_embedding cannot be None for similarity_search_with_score" ) content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) if kwargs.get("tenant"): query_obj = query_obj.with_tenant(kwargs.get("tenant")) embedded_query = self._embedding.embed_query(query) if not self._by_text: vector = {"vector": embedded_query} result = ( query_obj.with_near_vector(vector) .with_limit(k) .with_additional("vector") .do() ) else: result = ( query_obj.with_near_text(content) .with_limit(k) .with_additional("vector") .do() ) if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs_and_scores = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) score = np.dot(res["_additional"]["vector"], embedded_query) docs_and_scores.append((Document(page_content=text, metadata=res), score)) return docs_and_scores @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, *, client: Optional[weaviate.Client] = None, weaviate_url: Optional[str] = None, weaviate_api_key: Optional[str] = None, batch_size: Optional[int] = None, index_name: Optional[str] = None, text_key: str = "text", by_text: bool = False, relevance_score_fn: Optional[ Callable[[float], float] ] = _default_score_normalizer, **kwargs: Any, ) -> Weaviate: """Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Weaviate instance. 3. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Args: texts: Texts to add to vector store. embedding: Text embedding model to use. metadatas: Metadata associated with each text. client: weaviate.Client to use. weaviate_url: The Weaviate URL. If using Weaviate Cloud Services get it from the ``Details`` tab. Can be passed in as a named param or by setting the environment variable ``WEAVIATE_URL``. Should not be specified if client is provided. weaviate_api_key: The Weaviate API key. If enabled and using Weaviate Cloud Services, get it from ``Details`` tab. Can be passed in as a named param or by setting the environment variable ``WEAVIATE_API_KEY``. Should not be specified if client is provided. batch_size: Size of batch operations. index_name: Index name. text_key: Key to use for uploading/retrieving text to/from vectorstore. by_text: Whether to search by text or by embedding. relevance_score_fn: Function for converting whatever distance function the vector store uses to a relevance score, which is a normalized similarity score (0 means dissimilar, 1 means similar). kwargs: Additional named parameters to pass to ``Weaviate.__init__()``. Example: .. code-block:: python from langchain_community.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Weaviate embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) """ try: from weaviate.util import get_valid_uuid except ImportError as e: raise ImportError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`" ) from e client = client or _create_weaviate_client( url=weaviate_url, api_key=weaviate_api_key, ) if batch_size: client.batch.configure(batch_size=batch_size) index_name = index_name or f"LangChain_{uuid4().hex}" schema = _default_schema(index_name, text_key) # check whether the index already exists if not client.schema.exists(index_name): client.schema.create_class(schema) embeddings = embedding.embed_documents(texts) if embedding else None attributes = list(metadatas[0].keys()) if metadatas else None # If the UUID of one of the objects already exists # then the existing object will be replaced by the new object. if "uuids" in kwargs: uuids = kwargs.pop("uuids") else: uuids = [get_valid_uuid(uuid4()) for _ in range(len(texts))] with client.batch as batch: for i, text in enumerate(texts): data_properties = { text_key: text, } if metadatas is not None: for key in metadatas[i].keys(): data_properties[key] = metadatas[i][key] _id = uuids[i] # if an embedding strategy is not provided, we let # weaviate create the embedding. Note that this will only # work if weaviate has been installed with a vectorizer module # like text2vec-contextionary for example params = { "uuid": _id, "data_object": data_properties, "class_name": index_name, } if embeddings is not None: params["vector"] = embeddings[i] batch.add_data_object(**params) batch.flush() return cls( client, index_name, text_key, embedding=embedding, attributes=attributes, relevance_score_fn=relevance_score_fn, by_text=by_text, **kwargs, ) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") # TODO: Check if this can be done in bulk for id in ids: self._client.data_object.delete(uuid=id)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/tigris.py
from __future__ import annotations import itertools from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from tigrisdb import TigrisClient from tigrisdb import VectorStore as TigrisVectorStore from tigrisdb.types.filters import Filter as TigrisFilter from tigrisdb.types.vector import Document as TigrisDocument class Tigris(VectorStore): """`Tigris` vector store.""" def __init__(self, client: TigrisClient, embeddings: Embeddings, index_name: str): """Initialize Tigris vector store.""" try: import tigrisdb # noqa: F401 except ImportError: raise ImportError( "Could not import tigrisdb python package. " "Please install it with `pip install tigrisdb`" ) self._embed_fn = embeddings self._vector_store = TigrisVectorStore(client.get_search(), index_name) @property def embeddings(self) -> Embeddings: return self._embed_fn @property def search_index(self) -> TigrisVectorStore: return self._vector_store def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids for documents. Ids will be autogenerated if not provided. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ docs = self._prep_docs(texts, metadatas, ids) result = self.search_index.add_documents(docs) return [r.id for r in result] def similarity_search( self, query: str, k: int = 4, filter: Optional[TigrisFilter] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" docs_with_scores = self.similarity_search_with_score(query, k, filter) return [doc for doc, _ in docs_with_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[TigrisFilter] = None, ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[TigrisFilter]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ vector = self._embed_fn.embed_query(query) result = self.search_index.similarity_search( vector=vector, k=k, filter_by=filter ) docs: List[Tuple[Document, float]] = [] for r in result: docs.append( ( Document( page_content=r.doc["text"], metadata=r.doc.get("metadata") ), r.score, ) ) return docs @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, client: Optional[TigrisClient] = None, index_name: Optional[str] = None, **kwargs: Any, ) -> Tigris: """Return VectorStore initialized from texts and embeddings.""" if not index_name: raise ValueError("`index_name` is required") if not client: client = TigrisClient() store = cls(client, embedding, index_name) store.add_texts(texts=texts, metadatas=metadatas, ids=ids) return store def _prep_docs( self, texts: Iterable[str], metadatas: Optional[List[dict]], ids: Optional[List[str]], ) -> List[TigrisDocument]: embeddings: List[List[float]] = self._embed_fn.embed_documents(list(texts)) docs: List[TigrisDocument] = [] for t, m, e, _id in itertools.zip_longest( texts, metadatas or [], embeddings or [], ids or [] ): doc: TigrisDocument = { "text": t, "embeddings": e or [], "metadata": m or {}, } if _id: doc["id"] = _id docs.append(doc) return docs
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/clickhouse.py
from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from pydantic_settings import BaseSettings, SettingsConfigDict logger = logging.getLogger() def has_mul_sub_str(s: str, *args: Any) -> bool: """ Check if a string contains multiple substrings. Args: s: string to check. *args: substrings to check. Returns: True if all substrings are in the string, False otherwise. """ for a in args: if a not in s: return False return True class ClickhouseSettings(BaseSettings): """`ClickHouse` client configuration. Attribute: host (str) : An URL to connect to MyScale backend. Defaults to 'localhost'. port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Username to login. Defaults to None. password (str) : Password to login. Defaults to None. secure (bool) : Connect to server over secure connection. Defaults to False. index_type (str): index type string. index_param (list): index build parameter. index_query_params(dict): index query parameters. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'vector_table'. metric (str) : Metric to compute distance, supported are ('angular', 'euclidean', 'manhattan', 'hamming', 'dot'). Defaults to 'angular'. https://github.com/spotify/annoy/blob/main/src/annoymodule.cc#L149-L169 column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python { 'id': 'text_id', 'uuid': 'global_unique_id' 'embedding': 'text_embedding', 'document': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 8123 username: Optional[str] = None password: Optional[str] = None secure: bool = False index_type: Optional[str] = "annoy" # Annoy supports L2Distance and cosineDistance. index_param: Optional[Union[List, Dict]] = ["'L2Distance'", 100] index_query_params: Dict[str, str] = {} column_map: Dict[str, str] = { "id": "id", "uuid": "uuid", "document": "document", "embedding": "embedding", "metadata": "metadata", } database: str = "default" table: str = "langchain" metric: str = "angular" def __getitem__(self, item: str) -> Any: return getattr(self, item) model_config = SettingsConfigDict( env_file=".env", env_file_encoding="utf-8", env_prefix="clickhouse_", extra="ignore", ) class Clickhouse(VectorStore): """ClickHouse vector store integration. Setup: Install ``langchain_community`` and ``clickhouse-connect``: .. code-block:: bash pip install -qU langchain_community clickhouse-connect Key init args — indexing params: embedding: Embeddings Embedding function to use. Key init args — client params: config: Optional[ClickhouseSettings] ClickHouse client configuration. Instantiate: .. code-block:: python from langchain_community.vectorstores import Clickhouse, ClickhouseSettings from langchain_openai import OpenAIEmbeddings settings = ClickhouseSettings(table="clickhouse_example") vector_store = Clickhouse(embedding=OpenAIEmbeddings(), config=settings) Add Documents: .. code-block:: python from langchain_core.documents import Document document_1 = Document(page_content="foo", metadata={"baz": "bar"}) document_2 = Document(page_content="thud", metadata={"bar": "baz"}) document_3 = Document(page_content="i will be deleted :(") documents = [document_1, document_2, document_3] ids = ["1", "2", "3"] vector_store.add_documents(documents=documents, ids=ids) Delete Documents: .. code-block:: python vector_store.delete(ids=["3"]) # TODO: Fill out example output. Search: .. code-block:: python results = vector_store.similarity_search(query="thud",k=1) for doc in results: print(f"* {doc.page_content} [{doc.metadata}]") .. code-block:: python # TODO: Example output # TODO: Fill out with relevant variables and example output. Search with filter: .. code-block:: python # TODO: Edit filter if needed results = vector_store.similarity_search(query="thud",k=1,filter="metadata.baz='bar'") for doc in results: print(f"* {doc.page_content} [{doc.metadata}]") .. code-block:: python # TODO: Example output # TODO: Fill out with example output. Search with score: .. code-block:: python results = vector_store.similarity_search_with_score(query="qux",k=1) for doc, score in results: print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]") .. code-block:: python # TODO: Example output # TODO: Fill out with example output. Async: .. code-block:: python # add documents # await vector_store.aadd_documents(documents=documents, ids=ids) # delete documents # await vector_store.adelete(ids=["3"]) # search # results = vector_store.asimilarity_search(query="thud",k=1) # search with score results = await vector_store.asimilarity_search_with_score(query="qux",k=1) for doc,score in results: print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]") .. code-block:: python # TODO: Example output # TODO: Fill out with example output. Use as Retriever: .. code-block:: python retriever = vector_store.as_retriever( search_type="mmr", search_kwargs={"k": 1, "fetch_k": 2, "lambda_mult": 0.5}, ) retriever.invoke("thud") .. code-block:: python # TODO: Example output """ # noqa: E501 def __init__( self, embedding: Embeddings, config: Optional[ClickhouseSettings] = None, **kwargs: Any, ) -> None: """ClickHouse Wrapper to LangChain Args: embedding_function (Embeddings): embedding function to use config (ClickHouseSettings): Configuration to ClickHouse Client kwargs (any): Other keyword arguments will pass into [clickhouse-connect](https://docs.clickhouse.com/) """ try: from clickhouse_connect import get_client except ImportError: raise ImportError( "Could not import clickhouse connect python package. " "Please install it with `pip install clickhouse-connect`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x, **kwargs: x super().__init__() if config is not None: self.config = config else: self.config = ClickhouseSettings() assert self.config assert self.config.host and self.config.port assert ( self.config.column_map and self.config.database and self.config.table and self.config.metric ) for k in ["id", "embedding", "document", "metadata", "uuid"]: assert k in self.config.column_map assert self.config.metric in [ "angular", "euclidean", "manhattan", "hamming", "dot", ] # initialize the schema dim = len(embedding.embed_query("test")) index_params = ( ( ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()]) if self.config.index_param else "" ) if isinstance(self.config.index_param, Dict) else ( ",".join([str(p) for p in self.config.index_param]) if isinstance(self.config.index_param, List) else self.config.index_param ) ) self.schema = self._schema(dim, index_params) self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self.embedding_function = embedding self.dist_order = "ASC" # Only support ConsingDistance and L2Distance # Create a connection to clickhouse self.client = get_client( host=self.config.host, port=self.config.port, username=self.config.username, password=self.config.password, secure=self.config.secure, **kwargs, ) # Enable JSON type try: self.client.command("SET allow_experimental_json_type=1") except Exception as _: logger.debug( f"Clickhouse version={self.client.server_version} - " "There is no allow_experimental_json_type parameter." ) self.client.command("SET allow_experimental_object_type=1") if self.config.index_type: # Enable index self.client.command( f"SET allow_experimental_{self.config.index_type}_index=1" ) self.client.command(self.schema) def _schema(self, dim: int, index_params: Optional[str] = "") -> str: """Create table schema :param dim: dimension of embeddings :param index_params: parameters used for index This function returns a `CREATE TABLE` statement based on the value of `self.config.index_type`. If an index type is specified that index will be created, otherwise no index will be created. In the case of there being no index, a linear scan will be performed when the embedding field is queried. """ if self.config.index_type: return f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} Nullable(String), {self.config.column_map['document']} Nullable(String), {self.config.column_map['embedding']} Array(Float32), {self.config.column_map['metadata']} JSON, {self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(), CONSTRAINT cons_vec_len CHECK length( {self.config.column_map['embedding']}) = {dim}, INDEX vec_idx {self.config.column_map['embedding']} TYPE \ {self.config.index_type}({index_params}) GRANULARITY 1000 ) ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\ """ else: return f"""\ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} Nullable(String), {self.config.column_map['document']} Nullable(String), {self.config.column_map['embedding']} Array(Float32), {self.config.column_map['metadata']} JSON, {self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(), CONSTRAINT cons_vec_len CHECK length({ self.config.column_map['embedding']}) = {dim} ) ENGINE = MergeTree ORDER BY uuid """ @property def embeddings(self) -> Embeddings: """Provides access to the embedding mechanism used by the Clickhouse instance. This property allows direct access to the embedding function or model being used by the Clickhouse instance to convert text documents into embedding vectors for vector similarity search. Returns: The `Embeddings` instance associated with this Clickhouse instance. """ return self.embedding_function def escape_str(self, value: str) -> str: """Escape special characters in a string for Clickhouse SQL queries. This method is used internally to prepare strings for safe insertion into SQL queries by escaping special characters that might otherwise interfere with the query syntax. Args: value: The string to be escaped. Returns: The escaped string, safe for insertion into SQL queries. """ return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str: """Construct an SQL query for inserting data into the Clickhouse database. This method formats and constructs an SQL `INSERT` query string using the provided transaction data and column names. It is utilized internally during the process of batch insertion of documents and their embeddings into the database. Args: transac: iterable of tuples, representing a row of data to be inserted. column_names: iterable of strings representing the names of the columns into which data will be inserted. Returns: A string containing the constructed SQL `INSERT` query. """ ks = ",".join(column_names) _data = [] for n in transac: n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n]) _data.append(f"({n})") i_str = f""" INSERT INTO TABLE {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None: """Execute an SQL query to insert data into the Clickhouse database. This method performs the actual insertion of data into the database by executing the SQL query constructed by `_build_insert_sql`. It's a critical step in adding new documents and their associated data into the vector store. Args: transac:iterable of tuples, representing a row of data to be inserted. column_names: An iterable of strings representing the names of the columns into which data will be inserted. """ _insert_query = self._build_insert_sql(transac, column_names) self.client.command(_insert_query) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any, ) -> List[str]: """Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ # Embed and create the documents ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = { colmap_["id"]: ids, colmap_["document"]: texts, colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)), } metadatas = metadatas or [{} for _ in texts] column_names[colmap_["metadata"]] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar( zip(*values), desc="Inserting data...", total=len(metadatas) ): assert ( len(v[keys.index(self.config.column_map["embedding"])]) == self.dim ) transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[ClickhouseSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> Clickhouse: """Create ClickHouse wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (ClickHouseSettings, Optional): ClickHouse configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to ClickHouse. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Other keyword arguments will pass into [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns: ClickHouse Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx def __repr__(self) -> str: """Text representation for ClickHouse Vector Store, prints backends, username and schemas. Easy to use with `str(ClickHouse())` Returns: repr: string to show connection info and data schema """ _repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ " _repr += f"{self.config.host}:{self.config.port}\033[0m\n\n" _repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" _repr += "-" * 51 + "\n" for r in self.client.query( f"DESC {self.config.database}.{self.config.table}" ).named_results(): _repr += ( f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n" ) _repr += "-" * 51 + "\n" return _repr def _build_query_sql( self, q_emb: List[float], topk: int, where_str: Optional[str] = None ) -> str: """Construct an SQL query for performing a similarity search. This internal method generates an SQL query for finding the top-k most similar vectors in the database to a given query vector.It allows for optional filtering conditions to be applied via a WHERE clause. Args: q_emb: The query vector as a list of floats. topk: The number of top similar items to retrieve. where_str: opt str representing additional WHERE conditions for the query Defaults to None. Returns: A string containing the SQL query for the similarity search. """ q_emb_str = ",".join(map(str, q_emb)) if where_str: where_str = f"PREWHERE {where_str}" else: where_str = "" settings_strs = [] if self.config.index_query_params: for k in self.config.index_query_params: settings_strs.append(f"SETTING {k}={self.config.index_query_params[k]}") q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}]) AS dist {self.dist_order} LIMIT {topk} {' '.join(settings_strs)} """ return q_str def similarity_search( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Document]: """Perform a similarity search with ClickHouse Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector( self.embedding_function.embed_query(query), k, where_str, **kwargs ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search with ClickHouse by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_query_sql(embedding, k, where_str) try: return [ Document( page_content=r[self.config.column_map["document"]], metadata=r[self.config.column_map["metadata"]], ) for r in self.client.query(q_str).named_results() ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a similarity search with ClickHouse Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_query_sql( self.embedding_function.embed_query(query), k, where_str ) try: return [ ( Document( page_content=r[self.config.column_map["document"]], metadata=r[self.config.column_map["metadata"]], ), r["dist"], ) for r in self.client.query(q_str).named_results() ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] def drop(self) -> None: """ Helper function: Drop data """ self.client.command( f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}" ) @property def metadata_column(self) -> str: return self.config.column_map["metadata"]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/alibabacloud_opensearch.py
import json import logging import numbers from hashlib import sha1 from typing import Any, Dict, Iterable, List, Optional, Tuple from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore logger = logging.getLogger() class AlibabaCloudOpenSearchSettings: """Alibaba Cloud Opensearch` client configuration. Attribute: endpoint (str) : The endpoint of opensearch instance, You can find it from the console of Alibaba Cloud OpenSearch. instance_id (str) : The identify of opensearch instance, You can find it from the console of Alibaba Cloud OpenSearch. username (str) : The username specified when purchasing the instance. password (str) : The password specified when purchasing the instance, After the instance is created, you can modify it on the console. tablename (str): The table name specified during instance configuration. field_name_mapping (Dict) : Using field name mapping between opensearch vector store and opensearch instance configuration table field names: { 'id': 'The id field name map of index document.', 'document': 'The text field name map of index document.', 'embedding': 'In the embedding field of the opensearch instance, the values must be in float type and separated by separator, default is comma.', 'metadata_field_x': 'Metadata field mapping includes the mapped field name and operator in the mapping value, separated by a comma between the mapped field name and the operator.', } protocol (str): Communication Protocol between SDK and Server, default is http. namespace (str) : The instance data will be partitioned based on the "namespace" field,If the namespace is enabled, you need to specify the namespace field name during initialization, Otherwise, the queries cannot be executed correctly. embedding_field_separator(str): Delimiter specified for writing vector field data, default is comma. output_fields: Specify the field list returned when invoking OpenSearch, by default it is the value list of the field mapping field. """ def __init__( self, endpoint: str, instance_id: str, username: str, password: str, table_name: str, field_name_mapping: Dict[str, str], protocol: str = "http", namespace: str = "", embedding_field_separator: str = ",", output_fields: Optional[List[str]] = None, ) -> None: self.endpoint = endpoint self.instance_id = instance_id self.protocol = protocol self.username = username self.password = password self.namespace = namespace self.table_name = table_name self.opt_table_name = "_".join([self.instance_id, self.table_name]) self.field_name_mapping = field_name_mapping self.embedding_field_separator = embedding_field_separator if output_fields is None: self.output_fields = [ field.split(",")[0] for field in self.field_name_mapping.values() ] self.inverse_field_name_mapping: Dict[str, str] = {} for key, value in self.field_name_mapping.items(): self.inverse_field_name_mapping[value.split(",")[0]] = key def __getitem__(self, item: str) -> Any: return getattr(self, item) def create_metadata(fields: Dict[str, Any]) -> Dict[str, Any]: """Create metadata from fields. Args: fields: The fields of the document. The fields must be a dict. Returns: metadata: The metadata of the document. The metadata must be a dict. """ metadata: Dict[str, Any] = {} for key, value in fields.items(): if key == "id" or key == "document" or key == "embedding": continue metadata[key] = value return metadata class AlibabaCloudOpenSearch(VectorStore): """`Alibaba Cloud OpenSearch` vector store.""" def __init__( self, embedding: Embeddings, config: AlibabaCloudOpenSearchSettings, **kwargs: Any, ) -> None: try: from alibabacloud_ha3engine_vector import client, models from alibabacloud_tea_util import models as util_models except ImportError: raise ImportError( "Could not import alibaba cloud opensearch python package. " "Please install it with `pip install alibabacloud-ha3engine-vector`." ) self.config = config self.embedding = embedding self.runtime = util_models.RuntimeOptions( connect_timeout=5000, read_timeout=10000, autoretry=False, ignore_ssl=False, max_idle_conns=50, ) self.ha3_engine_client = client.Client( models.Config( endpoint=config.endpoint, instance_id=config.instance_id, protocol=config.protocol, access_user_name=config.username, access_pass_word=config.password, ) ) self.options_headers: Dict[str, str] = {} def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Insert documents into the instance.. Args: texts: The text segments to be inserted into the vector storage, should not be empty. metadatas: Metadata information. Returns: id_list: List of document IDs. """ def _upsert(push_doc_list: List[Dict]) -> List[str]: if push_doc_list is None or len(push_doc_list) == 0: return [] try: push_request = models.PushDocumentsRequest( self.options_headers, push_doc_list ) push_response = self.ha3_engine_client.push_documents( self.config.opt_table_name, field_name_map["id"], push_request ) json_response = json.loads(push_response.body) if json_response["status"] == "OK": return [ push_doc["fields"][field_name_map["id"]] for push_doc in push_doc_list ] return [] except Exception as e: logger.error( f"add doc to endpoint:{self.config.endpoint} " f"instance_id:{self.config.instance_id} failed.", e, ) raise e from alibabacloud_ha3engine_vector import models id_list = [sha1(t.encode("utf-8")).hexdigest() for t in texts] embeddings = self.embedding.embed_documents(list(texts)) metadatas = metadatas or [{} for _ in texts] field_name_map = self.config.field_name_mapping add_doc_list = [] text_list = list(texts) for idx, doc_id in enumerate(id_list): embedding = embeddings[idx] if idx < len(embeddings) else None metadata = metadatas[idx] if idx < len(metadatas) else None text = text_list[idx] if idx < len(text_list) else None add_doc: Dict[str, Any] = dict() add_doc_fields: Dict[str, Any] = dict() add_doc_fields.__setitem__(field_name_map["id"], doc_id) add_doc_fields.__setitem__(field_name_map["document"], text) if embedding is not None: add_doc_fields.__setitem__( field_name_map["embedding"], self.config.embedding_field_separator.join( str(unit) for unit in embedding ), ) if metadata is not None: for md_key, md_value in metadata.items(): add_doc_fields.__setitem__( field_name_map[md_key].split(",")[0], md_value ) add_doc.__setitem__("fields", add_doc_fields) add_doc.__setitem__("cmd", "add") add_doc_list.append(add_doc) return _upsert(add_doc_list) def similarity_search( self, query: str, k: int = 4, search_filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Perform similarity retrieval based on text. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents. """ embedding = self.embedding.embed_query(query) return self.create_results( self.inner_embedding_query( embedding=embedding, search_filter=search_filter, k=k ) ) def similarity_search_with_relevance_scores( self, query: str, k: int = 4, search_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform similarity retrieval based on text with scores. Args: query: Vectorize text for retrieval.,should not be empty. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents. """ embedding: List[float] = self.embedding.embed_query(query) return self.create_results_with_score( self.inner_embedding_query( embedding=embedding, search_filter=search_filter, k=k ) ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Perform retrieval directly using vectors. Args: embedding: vectors. k: top n. search_filter: Additional filtering conditions. Returns: document_list: List of documents. """ return self.create_results( self.inner_embedding_query( embedding=embedding, search_filter=search_filter, k=k ) ) def inner_embedding_query( self, embedding: List[float], search_filter: Optional[Dict[str, Any]] = None, k: int = 4, ) -> Dict[str, Any]: def generate_filter_query() -> str: if search_filter is None: return "" filter_clause = " AND ".join( [ create_filter(md_key, md_value) for md_key, md_value in search_filter.items() ] ) return filter_clause def create_filter(md_key: str, md_value: Any) -> str: md_filter_expr = self.config.field_name_mapping[md_key] if md_filter_expr is None: return "" expr = md_filter_expr.split(",") if len(expr) != 2: logger.error( f"filter {md_filter_expr} express is not correct, " f"must contain mapping field and operator." ) return "" md_filter_key = expr[0].strip() md_filter_operator = expr[1].strip() if isinstance(md_value, numbers.Number): return f"{md_filter_key} {md_filter_operator} {md_value}" return f'{md_filter_key}{md_filter_operator}"{md_value}"' def search_data() -> Dict[str, Any]: request = QueryRequest( table_name=self.config.table_name, namespace=self.config.namespace, vector=embedding, include_vector=True, output_fields=self.config.output_fields, filter=generate_filter_query(), top_k=k, ) query_result = self.ha3_engine_client.query(request) return json.loads(query_result.body) from alibabacloud_ha3engine_vector.models import QueryRequest try: json_response = search_data() if ( "errorCode" in json_response and "errorMsg" in json_response and len(json_response["errorMsg"]) > 0 ): logger.error( f"query {self.config.endpoint} {self.config.instance_id} " f"failed:{json_response['errorMsg']}." ) else: return json_response except Exception as e: logger.error( f"query instance endpoint:{self.config.endpoint} " f"instance_id:{self.config.instance_id} failed.", e, ) return {} def create_results(self, json_result: Dict[str, Any]) -> List[Document]: """Assemble documents.""" items = json_result["result"] query_result_list: List[Document] = [] for item in items: if ( "fields" not in item or self.config.field_name_mapping["document"] not in item["fields"] ): query_result_list.append(Document()) # type: ignore[call-arg] else: fields = item["fields"] query_result_list.append( Document( page_content=fields[self.config.field_name_mapping["document"]], metadata=self.create_inverse_metadata(fields), ) ) return query_result_list def create_inverse_metadata(self, fields: Dict[str, Any]) -> Dict[str, Any]: """Create metadata from fields. Args: fields: The fields of the document. The fields must be a dict. Returns: metadata: The metadata of the document. The metadata must be a dict. """ metadata: Dict[str, Any] = {} for key, value in fields.items(): if key == "id" or key == "document" or key == "embedding": continue metadata[self.config.inverse_field_name_mapping[key]] = value return metadata def create_results_with_score( self, json_result: Dict[str, Any] ) -> List[Tuple[Document, float]]: """Parsing the returned results with scores. Args: json_result: Results from OpenSearch query. Returns: query_result_list: Results with scores. """ items = json_result["result"] query_result_list: List[Tuple[Document, float]] = [] for item in items: fields = item["fields"] query_result_list.append( ( Document( page_content=fields[self.config.field_name_mapping["document"]], metadata=self.create_inverse_metadata(fields), ), float(item["score"]), ) ) return query_result_list def delete_documents_with_texts(self, texts: List[str]) -> bool: """Delete documents based on their page content. Args: texts: List of document page content. Returns: Whether the deletion was successful or not. """ id_list = [sha1(t.encode("utf-8")).hexdigest() for t in texts] return self.delete_documents_with_document_id(id_list) def delete_documents_with_document_id(self, id_list: List[str]) -> bool: """Delete documents based on their IDs. Args: id_list: List of document IDs. Returns: Whether the deletion was successful or not. """ if id_list is None or len(id_list) == 0: return True from alibabacloud_ha3engine_vector import models delete_doc_list = [] for doc_id in id_list: delete_doc_list.append( { "fields": {self.config.field_name_mapping["id"]: doc_id}, "cmd": "delete", } ) delete_request = models.PushDocumentsRequest( self.options_headers, delete_doc_list ) try: delete_response = self.ha3_engine_client.push_documents( self.config.opt_table_name, self.config.field_name_mapping["id"], delete_request, ) json_response = json.loads(delete_response.body) return json_response["status"] == "OK" except Exception as e: logger.error( f"delete doc from :{self.config.endpoint} " f"instance_id:{self.config.instance_id} failed.", e, ) raise e @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, config: Optional[AlibabaCloudOpenSearchSettings] = None, **kwargs: Any, ) -> "AlibabaCloudOpenSearch": """Create alibaba cloud opensearch vector store instance. Args: texts: The text segments to be inserted into the vector storage, should not be empty. embedding: Embedding function, Embedding function. config: Alibaba OpenSearch instance configuration. metadatas: Metadata information. Returns: AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance. """ if texts is None or len(texts) == 0: raise Exception("the inserted text segments, should not be empty.") if embedding is None: raise Exception("the embeddings should not be empty.") if config is None: raise Exception("config should not be none.") ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts=texts, metadatas=metadatas) return ctx @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, config: Optional[AlibabaCloudOpenSearchSettings] = None, **kwargs: Any, ) -> "AlibabaCloudOpenSearch": """Create alibaba cloud opensearch vector store instance. Args: documents: Documents to be inserted into the vector storage, should not be empty. embedding: Embedding function, Embedding function. config: Alibaba OpenSearch instance configuration. ids: Specify the ID for the inserted document. If left empty, the ID will be automatically generated based on the text content. Returns: AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance. """ if documents is None or len(documents) == 0: raise Exception("the inserted documents, should not be empty.") if embedding is None: raise Exception("the embeddings should not be empty.") if config is None: raise Exception("config can't be none") texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, config=config, **kwargs, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/neo4j_vector.py
from __future__ import annotations import enum import logging import os from hashlib import md5 from typing import ( Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, ) import numpy as np from langchain_core._api.deprecation import deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore from langchain_community.graphs import Neo4jGraph from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE DISTANCE_MAPPING = { DistanceStrategy.EUCLIDEAN_DISTANCE: "euclidean", DistanceStrategy.COSINE: "cosine", } COMPARISONS_TO_NATIVE = { "$eq": "=", "$ne": "<>", "$lt": "<", "$lte": "<=", "$gt": ">", "$gte": ">=", } SPECIAL_CASED_OPERATORS = { "$in", "$nin", "$between", } TEXT_OPERATORS = { "$like", "$ilike", } LOGICAL_OPERATORS = {"$and", "$or"} SUPPORTED_OPERATORS = ( set(COMPARISONS_TO_NATIVE) .union(TEXT_OPERATORS) .union(LOGICAL_OPERATORS) .union(SPECIAL_CASED_OPERATORS) ) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.SearchType", ) class SearchType(str, enum.Enum): """Enumerator of the Distance strategies.""" VECTOR = "vector" HYBRID = "hybrid" DEFAULT_SEARCH_TYPE = SearchType.VECTOR @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.IndexType", ) class IndexType(str, enum.Enum): """Enumerator of the index types.""" NODE = "NODE" RELATIONSHIP = "RELATIONSHIP" DEFAULT_INDEX_TYPE = IndexType.NODE @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector._get_search_index_query", ) def _get_search_index_query( search_type: SearchType, index_type: IndexType = DEFAULT_INDEX_TYPE ) -> str: if index_type == IndexType.NODE: type_to_query_map = { SearchType.VECTOR: ( "CALL db.index.vector.queryNodes($index, $k, $embedding) " "YIELD node, score " ), SearchType.HYBRID: ( "CALL { " "CALL db.index.vector.queryNodes($index, $k, $embedding) " "YIELD node, score " "WITH collect({node:node, score:score}) AS nodes, max(score) AS max " "UNWIND nodes AS n " # We use 0 as min "RETURN n.node AS node, (n.score / max) AS score UNION " "CALL db.index.fulltext.queryNodes($keyword_index, $query, " "{limit: $k}) YIELD node, score " "WITH collect({node:node, score:score}) AS nodes, max(score) AS max " "UNWIND nodes AS n " # We use 0 as min "RETURN n.node AS node, (n.score / max) AS score " "} " # dedup "WITH node, max(score) AS score ORDER BY score DESC LIMIT $k " ), } return type_to_query_map[search_type] else: return ( "CALL db.index.vector.queryRelationships($index, $k, $embedding) " "YIELD relationship, score " ) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.check_if_not_null", ) def check_if_not_null(props: List[str], values: List[Any]) -> None: """Check if the values are not None or empty string""" for prop, value in zip(props, values): if not value: raise ValueError(f"Parameter `{prop}` must not be None or empty string") @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.sort_by_index_name", ) def sort_by_index_name( lst: List[Dict[str, Any]], index_name: str ) -> List[Dict[str, Any]]: """Sort first element to match the index_name if exists""" return sorted(lst, key=lambda x: x.get("name") != index_name) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.remove_lucene_chars", ) def remove_lucene_chars(text: str) -> str: """Remove Lucene special characters""" special_chars = [ "+", "-", "&", "|", "!", "(", ")", "{", "}", "[", "]", "^", '"', "~", "*", "?", ":", "\\", ] for char in special_chars: if char in text: text = text.replace(char, " ") return text.strip() @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.dict_to_yaml_str", ) def dict_to_yaml_str(input_dict: Dict, indent: int = 0) -> str: """ Convert a dictionary to a YAML-like string without using external libraries. Parameters: - input_dict (dict): The dictionary to convert. - indent (int): The current indentation level. Returns: - str: The YAML-like string representation of the input dictionary. """ yaml_str = "" for key, value in input_dict.items(): padding = " " * indent if isinstance(value, dict): yaml_str += f"{padding}{key}:\n{dict_to_yaml_str(value, indent + 1)}" elif isinstance(value, list): yaml_str += f"{padding}{key}:\n" for item in value: yaml_str += f"{padding}- {item}\n" else: yaml_str += f"{padding}{key}: {value}\n" return yaml_str @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.combine_queries", ) def combine_queries( input_queries: List[Tuple[str, Dict[str, Any]]], operator: str ) -> Tuple[str, Dict[str, Any]]: """Combine multiple queries with an operator.""" # Initialize variables to hold the combined query and parameters combined_query: str = "" combined_params: Dict = {} param_counter: Dict = {} for query, params in input_queries: # Process each query fragment and its parameters new_query = query for param, value in params.items(): # Update the parameter name to ensure uniqueness if param in param_counter: param_counter[param] += 1 else: param_counter[param] = 1 new_param_name = f"{param}_{param_counter[param]}" # Replace the parameter in the query fragment new_query = new_query.replace(f"${param}", f"${new_param_name}") # Add the parameter to the combined parameters dictionary combined_params[new_param_name] = value # Combine the query fragments with an AND operator if combined_query: combined_query += f" {operator} " combined_query += f"({new_query})" return combined_query, combined_params @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.collect_params", ) def collect_params( input_data: List[Tuple[str, Dict[str, str]]], ) -> Tuple[List[str], Dict[str, Any]]: """Transform the input data into the desired format. Args: - input_data (list of tuples): Input data to transform. Each tuple contains a string and a dictionary. Returns: - tuple: A tuple containing a list of strings and a dictionary. """ # Initialize variables to hold the output parts query_parts = [] params = {} # Loop through each item in the input data for query_part, param in input_data: # Append the query part to the list query_parts.append(query_part) # Update the params dictionary with the param dictionary params.update(param) # Return the transformed data return (query_parts, params) @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector._handle_field_filter", ) def _handle_field_filter( field: str, value: Any, param_number: int = 1 ) -> Tuple[str, Dict]: """Create a filter for a specific field. Args: field: name of field value: value to filter If provided as is then this will be an equality filter If provided as a dictionary then this will be a filter, the key will be the operator and the value will be the value to filter by param_number: sequence number of parameters used to map between param dict and Cypher snippet Returns a tuple of - Cypher filter snippet - Dictionary with parameters used in filter snippet """ if not isinstance(field, str): raise ValueError( f"field should be a string but got: {type(field)} with value: {field}" ) if field.startswith("$"): raise ValueError( f"Invalid filter condition. Expected a field but got an operator: " f"{field}" ) # Allow [a-zA-Z0-9_], disallow $ for now until we support escape characters if not field.isidentifier(): raise ValueError(f"Invalid field name: {field}. Expected a valid identifier.") if isinstance(value, dict): # This is a filter specification if len(value) != 1: raise ValueError( "Invalid filter condition. Expected a value which " "is a dictionary with a single key that corresponds to an operator " f"but got a dictionary with {len(value)} keys. The first few " f"keys are: {list(value.keys())[:3]}" ) operator, filter_value = list(value.items())[0] # Verify that that operator is an operator if operator not in SUPPORTED_OPERATORS: raise ValueError( f"Invalid operator: {operator}. " f"Expected one of {SUPPORTED_OPERATORS}" ) else: # Then we assume an equality operator operator = "$eq" filter_value = value if operator in COMPARISONS_TO_NATIVE: # Then we implement an equality filter # native is trusted input native = COMPARISONS_TO_NATIVE[operator] query_snippet = f"n.`{field}` {native} $param_{param_number}" query_param = {f"param_{param_number}": filter_value} return (query_snippet, query_param) elif operator == "$between": low, high = filter_value query_snippet = ( f"$param_{param_number}_low <= n.`{field}` <= $param_{param_number}_high" ) query_param = { f"param_{param_number}_low": low, f"param_{param_number}_high": high, } return (query_snippet, query_param) elif operator in {"$in", "$nin", "$like", "$ilike"}: # We'll do force coercion to text if operator in {"$in", "$nin"}: for val in filter_value: if not isinstance(val, (str, int, float)): raise NotImplementedError( f"Unsupported type: {type(val)} for value: {val}" ) if operator in {"$in"}: query_snippet = f"n.`{field}` IN $param_{param_number}" query_param = {f"param_{param_number}": filter_value} return (query_snippet, query_param) elif operator in {"$nin"}: query_snippet = f"n.`{field}` NOT IN $param_{param_number}" query_param = {f"param_{param_number}": filter_value} return (query_snippet, query_param) elif operator in {"$like"}: query_snippet = f"n.`{field}` CONTAINS $param_{param_number}" query_param = {f"param_{param_number}": filter_value.rstrip("%")} return (query_snippet, query_param) elif operator in {"$ilike"}: query_snippet = f"toLower(n.`{field}`) CONTAINS $param_{param_number}" query_param = {f"param_{param_number}": filter_value.rstrip("%")} return (query_snippet, query_param) else: raise NotImplementedError() else: raise NotImplementedError() @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.vectorstores.neo4j_vector.construct_metadata_filter", ) def construct_metadata_filter(filter: Dict[str, Any]) -> Tuple[str, Dict]: """Construct a metadata filter. Args: filter: A dictionary representing the filter condition. Returns: Tuple[str, Dict] """ if isinstance(filter, dict): if len(filter) == 1: # The only operators allowed at the top level are $AND and $OR # First check if an operator or a field key, value = list(filter.items())[0] if key.startswith("$"): # Then it's an operator if key.lower() not in ["$and", "$or"]: raise ValueError( f"Invalid filter condition. Expected $and or $or " f"but got: {key}" ) else: # Then it's a field return _handle_field_filter(key, filter[key]) # Here we handle the $and and $or operators if not isinstance(value, list): raise ValueError( f"Expected a list, but got {type(value)} for value: {value}" ) if key.lower() == "$and": and_ = combine_queries( [construct_metadata_filter(el) for el in value], "AND" ) if len(and_) >= 1: return and_ else: raise ValueError( "Invalid filter condition. Expected a dictionary " "but got an empty dictionary" ) elif key.lower() == "$or": or_ = combine_queries( [construct_metadata_filter(el) for el in value], "OR" ) if len(or_) >= 1: return or_ else: raise ValueError( "Invalid filter condition. Expected a dictionary " "but got an empty dictionary" ) else: raise ValueError( f"Invalid filter condition. Expected $and or $or " f"but got: {key}" ) elif len(filter) > 1: # Then all keys have to be fields (they cannot be operators) for key in filter.keys(): if key.startswith("$"): raise ValueError( f"Invalid filter condition. Expected a field but got: {key}" ) # These should all be fields and combined using an $and operator and_multiple = collect_params( [ _handle_field_filter(k, v, index) for index, (k, v) in enumerate(filter.items()) ] ) if len(and_multiple) >= 1: return " AND ".join(and_multiple[0]), and_multiple[1] else: raise ValueError( "Invalid filter condition. Expected a dictionary " "but got an empty dictionary" ) else: raise ValueError("Got an empty dictionary for filters.") @deprecated( since="0.3.8", removal="1.0", alternative_import="langchain_neo4j.Neo4jVector", ) class Neo4jVector(VectorStore): """`Neo4j` vector index. To use, you should have the ``neo4j`` python package installed. Args: url: Neo4j connection url username: Neo4j username. password: Neo4j password database: Optionally provide Neo4j database Defaults to "neo4j" embedding: Any embedding function implementing `langchain.embeddings.base.Embeddings` interface. distance_strategy: The distance strategy to use. (default: COSINE) search_type: The type of search to be performed, either 'vector' or 'hybrid' node_label: The label used for nodes in the Neo4j database. (default: "Chunk") embedding_node_property: The property name in Neo4j to store embeddings. (default: "embedding") text_node_property: The property name in Neo4j to store the text. (default: "text") retrieval_query: The Cypher query to be used for customizing retrieval. If empty, a default query will be used. index_type: The type of index to be used, either 'NODE' or 'RELATIONSHIP' pre_delete_collection: If True, will delete existing data if it exists. (default: False). Useful for testing. Example: .. code-block:: python from langchain_community.vectorstores.neo4j_vector import Neo4jVector from langchain_community.embeddings.openai import OpenAIEmbeddings url="bolt://localhost:7687" username="neo4j" password="pleaseletmein" embeddings = OpenAIEmbeddings() vectorestore = Neo4jVector.from_documents( embedding=embeddings, documents=docs, url=url username=username, password=password, ) """ def __init__( self, embedding: Embeddings, *, search_type: SearchType = SearchType.VECTOR, username: Optional[str] = None, password: Optional[str] = None, url: Optional[str] = None, keyword_index_name: Optional[str] = "keyword", database: Optional[str] = None, index_name: str = "vector", node_label: str = "Chunk", embedding_node_property: str = "embedding", text_node_property: str = "text", distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, logger: Optional[logging.Logger] = None, pre_delete_collection: bool = False, retrieval_query: str = "", relevance_score_fn: Optional[Callable[[float], float]] = None, index_type: IndexType = DEFAULT_INDEX_TYPE, graph: Optional[Neo4jGraph] = None, ) -> None: try: import neo4j except ImportError: raise ImportError( "Could not import neo4j python package. " "Please install it with `pip install neo4j`." ) # Allow only cosine and euclidean distance strategies if distance_strategy not in [ DistanceStrategy.EUCLIDEAN_DISTANCE, DistanceStrategy.COSINE, ]: raise ValueError( "distance_strategy must be either 'EUCLIDEAN_DISTANCE' or 'COSINE'" ) # Graph object takes precedent over env or input params if graph: self._driver = graph._driver self._database = graph._database else: # Handle if the credentials are environment variables # Support URL for backwards compatibility if not url: url = os.environ.get("NEO4J_URL") url = get_from_dict_or_env({"url": url}, "url", "NEO4J_URI") username = get_from_dict_or_env( {"username": username}, "username", "NEO4J_USERNAME" ) password = get_from_dict_or_env( {"password": password}, "password", "NEO4J_PASSWORD" ) database = get_from_dict_or_env( {"database": database}, "database", "NEO4J_DATABASE", "neo4j" ) self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password)) self._database = database # Verify connection try: self._driver.verify_connectivity() except neo4j.exceptions.ServiceUnavailable: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the url is correct" ) except neo4j.exceptions.AuthError: raise ValueError( "Could not connect to Neo4j database. " "Please ensure that the username and password are correct" ) self.schema = "" # Verify if the version support vector index self._is_enterprise = False self.verify_version() # Verify that required values are not null check_if_not_null( [ "index_name", "node_label", "embedding_node_property", "text_node_property", ], [index_name, node_label, embedding_node_property, text_node_property], ) self.embedding = embedding self._distance_strategy = distance_strategy self.index_name = index_name self.keyword_index_name = keyword_index_name self.node_label = node_label self.embedding_node_property = embedding_node_property self.text_node_property = text_node_property self.logger = logger or logging.getLogger(__name__) self.override_relevance_score_fn = relevance_score_fn self.retrieval_query = retrieval_query self.search_type = search_type self._index_type = index_type # Calculate embedding dimension self.embedding_dimension = len(embedding.embed_query("foo")) # Delete existing data if flagged if pre_delete_collection: from neo4j.exceptions import DatabaseError self.query( f"MATCH (n:`{self.node_label}`) " "CALL (n) { DETACH DELETE n } " "IN TRANSACTIONS OF 10000 ROWS;" ) # Delete index try: self.query(f"DROP INDEX {self.index_name}") except DatabaseError: # Index didn't exist yet pass def query( self, query: str, *, params: Optional[dict] = None, ) -> List[Dict[str, Any]]: """Query Neo4j database with retries and exponential backoff. Args: query (str): The Cypher query to execute. params (dict, optional): Dictionary of query parameters. Defaults to {}. Returns: List[Dict[str, Any]]: List of dictionaries containing the query results. """ from neo4j import Query from neo4j.exceptions import Neo4jError params = params or {} try: data, _, _ = self._driver.execute_query( query, database_=self._database, parameters_=params ) return [r.data() for r in data] except Neo4jError as e: if not ( ( ( # isCallInTransactionError e.code == "Neo.DatabaseError.Statement.ExecutionFailed" or e.code == "Neo.DatabaseError.Transaction.TransactionStartFailed" ) and "in an implicit transaction" in e.message # type: ignore[operator] ) or ( # isPeriodicCommitError e.code == "Neo.ClientError.Statement.SemanticError" and ( "in an open transaction is not possible" in e.message # type: ignore[operator] or "tried to execute in an explicit transaction" in e.message # type: ignore[operator] ) ) ): raise # Fallback to allow implicit transactions with self._driver.session(database=self._database) as session: data = session.run(Query(text=query), params) # type: ignore[assignment] return [r.data() for r in data] def verify_version(self) -> None: """ Check if the connected Neo4j database version supports vector indexing. Queries the Neo4j database to retrieve its version and compares it against a target version (5.11.0) that is known to support vector indexing. Raises a ValueError if the connected Neo4j version is not supported. """ db_data = self.query("CALL dbms.components()") version = db_data[0]["versions"][0] if "aura" in version: version_tuple = tuple(map(int, version.split("-")[0].split("."))) + (0,) else: version_tuple = tuple(map(int, version.split("."))) target_version = (5, 11, 0) if version_tuple < target_version: raise ValueError( "Version index is only supported in Neo4j version 5.11 or greater" ) # Flag for metadata filtering metadata_target_version = (5, 18, 0) if version_tuple < metadata_target_version: self.support_metadata_filter = False else: self.support_metadata_filter = True # Flag for enterprise self._is_enterprise = True if db_data[0]["edition"] == "enterprise" else False def retrieve_existing_index(self) -> Tuple[Optional[int], Optional[str]]: """ Check if the vector index exists in the Neo4j database and returns its embedding dimension. This method queries the Neo4j database for existing indexes and attempts to retrieve the dimension of the vector index with the specified name. If the index exists, its dimension is returned. If the index doesn't exist, `None` is returned. Returns: int or None: The embedding dimension of the existing index if found. """ index_information = self.query( "SHOW INDEXES YIELD name, type, entityType, labelsOrTypes, " "properties, options WHERE type = 'VECTOR' AND (name = $index_name " "OR (labelsOrTypes[0] = $node_label AND " "properties[0] = $embedding_node_property)) " "RETURN name, entityType, labelsOrTypes, properties, options ", params={ "index_name": self.index_name, "node_label": self.node_label, "embedding_node_property": self.embedding_node_property, }, ) # sort by index_name index_information = sort_by_index_name(index_information, self.index_name) try: self.index_name = index_information[0]["name"] self.node_label = index_information[0]["labelsOrTypes"][0] self.embedding_node_property = index_information[0]["properties"][0] self._index_type = index_information[0]["entityType"] embedding_dimension = None index_config = index_information[0]["options"]["indexConfig"] if "vector.dimensions" in index_config: embedding_dimension = index_config["vector.dimensions"] return embedding_dimension, index_information[0]["entityType"] except IndexError: return None, None def retrieve_existing_fts_index( self, text_node_properties: List[str] = [] ) -> Optional[str]: """ Check if the fulltext index exists in the Neo4j database This method queries the Neo4j database for existing fts indexes with the specified name. Returns: (Tuple): keyword index information """ index_information = self.query( "SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options " "WHERE type = 'FULLTEXT' AND (name = $keyword_index_name " "OR (labelsOrTypes = [$node_label] AND " "properties = $text_node_property)) " "RETURN name, labelsOrTypes, properties, options ", params={ "keyword_index_name": self.keyword_index_name, "node_label": self.node_label, "text_node_property": text_node_properties or [self.text_node_property], }, ) # sort by index_name index_information = sort_by_index_name(index_information, self.index_name) try: self.keyword_index_name = index_information[0]["name"] self.text_node_property = index_information[0]["properties"][0] node_label = index_information[0]["labelsOrTypes"][0] return node_label except IndexError: return None def create_new_index(self) -> None: """ This method constructs a Cypher query and executes it to create a new vector index in Neo4j. """ index_query = ( f"CREATE VECTOR INDEX {self.index_name} IF NOT EXISTS " f"FOR (m:`{self.node_label}`) ON m.`{self.embedding_node_property}` " "OPTIONS { indexConfig: { " "`vector.dimensions`: toInteger($embedding_dimension), " "`vector.similarity_function`: $similarity_metric }}" ) parameters = { "embedding_dimension": self.embedding_dimension, "similarity_metric": DISTANCE_MAPPING[self._distance_strategy], } self.query(index_query, params=parameters) def create_new_keyword_index(self, text_node_properties: List[str] = []) -> None: """ This method constructs a Cypher query and executes it to create a new full text index in Neo4j. """ node_props = text_node_properties or [self.text_node_property] fts_index_query = ( f"CREATE FULLTEXT INDEX {self.keyword_index_name} " f"FOR (n:`{self.node_label}`) ON EACH " f"[{', '.join(['n.`' + el + '`' for el in node_props])}]" ) self.query(fts_index_query) @property def embeddings(self) -> Embeddings: return self.embedding @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, create_id_index: bool = True, search_type: SearchType = SearchType.VECTOR, **kwargs: Any, ) -> Neo4jVector: if ids is None: ids = [md5(text.encode("utf-8")).hexdigest() for text in texts] if not metadatas: metadatas = [{} for _ in texts] store = cls( embedding=embedding, search_type=search_type, **kwargs, ) # Check if the vector index already exists embedding_dimension, index_type = store.retrieve_existing_index() # Raise error if relationship index type if index_type == "RELATIONSHIP": raise ValueError( "Data ingestion is not supported with relationship vector index." ) # If the vector index doesn't exist yet if not index_type: store.create_new_index() # If the index already exists, check if embedding dimensions match elif ( embedding_dimension and not store.embedding_dimension == embedding_dimension ): raise ValueError( f"Index with name {store.index_name} already exists." "The provided embedding function and vector index " "dimensions do not match.\n" f"Embedding function dimension: {store.embedding_dimension}\n" f"Vector index dimension: {embedding_dimension}" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() # If the FTS index doesn't exist yet if not fts_node_label: store.create_new_keyword_index() else: # Validate that FTS and Vector index use the same information if not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label" ) # Create unique constraint for faster import if create_id_index: store.query( "CREATE CONSTRAINT IF NOT EXISTS " f"FOR (n:`{store.node_label}`) REQUIRE n.id IS UNIQUE;" ) store.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) return store def add_embeddings( self, texts: Iterable[str], embeddings: List[List[float]], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Add embeddings to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. embeddings: List of list of embedding vectors. metadatas: List of metadatas associated with the texts. kwargs: vectorstore specific parameters """ if ids is None: ids = [md5(text.encode("utf-8")).hexdigest() for text in texts] if not metadatas: metadatas = [{} for _ in texts] import_query = ( "UNWIND $data AS row " "CALL (row) { WITH row " f"MERGE (c:`{self.node_label}` {{id: row.id}}) " "WITH c, row " f"CALL db.create.setNodeVectorProperty(c, " f"'{self.embedding_node_property}', row.embedding) " f"SET c.`{self.text_node_property}` = row.text " "SET c += row.metadata " "} IN TRANSACTIONS OF 1000 ROWS " ) parameters = { "data": [ {"text": text, "metadata": metadata, "embedding": embedding, "id": id} for text, metadata, embedding, id in zip( texts, metadatas, embeddings, ids ) ] } self.query(import_query, params=parameters) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.add_embeddings( texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs ) def similarity_search( self, query: str, k: int = 4, params: Dict[str, Any] = {}, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Neo4jVector. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. params (Dict[str, Any]): The search params for the index type. Defaults to empty dict. filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to filter on metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, query=query, params=params, filter=filter, **kwargs, ) def similarity_search_with_score( self, query: str, k: int = 4, params: Dict[str, Any] = {}, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. params (Dict[str, Any]): The search params for the index type. Defaults to empty dict. filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to filter on metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, query=query, params=params, filter=filter, **kwargs, ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, params: Dict[str, Any] = {}, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Perform a similarity search in the Neo4j database using a given vector and return the top k similar documents with their scores. This method uses a Cypher query to find the top k documents that are most similar to a given embedding. The similarity is measured using a vector index in the Neo4j database. The results are returned as a list of tuples, each containing a Document object and its similarity score. Args: embedding (List[float]): The embedding vector to compare against. k (int, optional): The number of top similar documents to retrieve. filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to filter on metadata. Defaults to None. params (Dict[str, Any]): The search params for the index type. Defaults to empty dict. Returns: List[Tuple[Document, float]]: A list of tuples, each containing a Document object and its similarity score. """ if filter: # Verify that 5.18 or later is used if not self.support_metadata_filter: raise ValueError( "Metadata filtering is only supported in " "Neo4j version 5.18 or greater" ) # Metadata filtering and hybrid doesn't work if self.search_type == SearchType.HYBRID: raise ValueError( "Metadata filtering can't be use in combination with " "a hybrid search approach" ) parallel_query = ( "CYPHER runtime = parallel parallelRuntimeSupport=all " if self._is_enterprise else "" ) base_index_query = parallel_query + ( f"MATCH (n:`{self.node_label}`) WHERE " f"n.`{self.embedding_node_property}` IS NOT NULL AND " f"size(n.`{self.embedding_node_property}`) = " f"toInteger({self.embedding_dimension}) AND " ) base_cosine_query = ( " WITH n as node, vector.similarity.cosine(" f"n.`{self.embedding_node_property}`, " "$embedding) AS score ORDER BY score DESC LIMIT toInteger($k) " ) filter_snippets, filter_params = construct_metadata_filter(filter) index_query = base_index_query + filter_snippets + base_cosine_query else: index_query = _get_search_index_query(self.search_type, self._index_type) filter_params = {} if self._index_type == IndexType.RELATIONSHIP: if kwargs.get("return_embeddings"): default_retrieval = ( f"RETURN relationship.`{self.text_node_property}` AS text, score, " f"relationship {{.*, `{self.text_node_property}`: Null, " f"`{self.embedding_node_property}`: Null, id: Null, " f"_embedding_: relationship.`{self.embedding_node_property}`}} " "AS metadata" ) else: default_retrieval = ( f"RETURN relationship.`{self.text_node_property}` AS text, score, " f"relationship {{.*, `{self.text_node_property}`: Null, " f"`{self.embedding_node_property}`: Null, id: Null }} AS metadata" ) else: if kwargs.get("return_embeddings"): default_retrieval = ( f"RETURN node.`{self.text_node_property}` AS text, score, " f"node {{.*, `{self.text_node_property}`: Null, " f"`{self.embedding_node_property}`: Null, id: Null, " f"_embedding_: node.`{self.embedding_node_property}`}} AS metadata" ) else: default_retrieval = ( f"RETURN node.`{self.text_node_property}` AS text, score, " f"node {{.*, `{self.text_node_property}`: Null, " f"`{self.embedding_node_property}`: Null, id: Null }} AS metadata" ) retrieval_query = ( self.retrieval_query if self.retrieval_query else default_retrieval ) read_query = index_query + retrieval_query parameters = { "index": self.index_name, "k": k, "embedding": embedding, "keyword_index": self.keyword_index_name, "query": remove_lucene_chars(kwargs["query"]), **params, **filter_params, } results = self.query(read_query, params=parameters) if any(result["text"] is None for result in results): if not self.retrieval_query: raise ValueError( f"Make sure that none of the `{self.text_node_property}` " f"properties on nodes with label `{self.node_label}` " "are missing or empty" ) else: raise ValueError( "Inspect the `retrieval_query` and ensure it doesn't " "return None for the `text` column" ) if kwargs.get("return_embeddings") and any( result["metadata"]["_embedding_"] is None for result in results ): if not self.retrieval_query: raise ValueError( f"Make sure that none of the `{self.embedding_node_property}` " f"properties on nodes with label `{self.node_label}` " "are missing or empty" ) else: raise ValueError( "Inspect the `retrieval_query` and ensure it doesn't " "return None for the `_embedding_` metadata column" ) docs = [ ( Document( page_content=dict_to_yaml_str(result["text"]) if isinstance(result["text"], dict) else result["text"], metadata={ k: v for k, v in result["metadata"].items() if v is not None }, ), result["score"], ) for result in results ] return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, params: Dict[str, Any] = {}, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, Any]]): Dictionary of argument(s) to filter on metadata. Defaults to None. params (Dict[str, Any]): The search params for the index type. Defaults to empty dict. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter, params=params, **kwargs ) return [doc for doc, _ in docs_and_scores] @classmethod def from_texts( cls: Type[Neo4jVector], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Neo4jVector: """ Return Neo4jVector initialized from texts and embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters. """ embeddings = embedding.embed_documents(list(texts)) return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, distance_strategy=distance_strategy, **kwargs, ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> Neo4jVector: """Construct Neo4jVector wrapper from raw documents and pre- generated embeddings. Return Neo4jVector initialized from documents and embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters. Example: .. code-block:: python from langchain_community.vectorstores.neo4j_vector import Neo4jVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) vectorstore = Neo4jVector.from_embeddings( text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas=metadatas, ids=ids, distance_strategy=distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs, ) @classmethod def from_existing_index( cls: Type[Neo4jVector], embedding: Embeddings, index_name: str, search_type: SearchType = DEFAULT_SEARCH_TYPE, keyword_index_name: Optional[str] = None, **kwargs: Any, ) -> Neo4jVector: """ Get instance of an existing Neo4j vector index. This method will return the instance of the store without inserting any new embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters along with the `index_name` definition. """ if search_type == SearchType.HYBRID and not keyword_index_name: raise ValueError( "keyword_index name has to be specified " "when using hybrid search option" ) store = cls( embedding=embedding, index_name=index_name, keyword_index_name=keyword_index_name, search_type=search_type, **kwargs, ) embedding_dimension, index_type = store.retrieve_existing_index() # Raise error if relationship index type if index_type == "RELATIONSHIP": raise ValueError( "Relationship vector index is not supported with " "`from_existing_index` method. Please use the " "`from_existing_relationship_index` method." ) if not index_type: raise ValueError( "The specified vector index name does not exist. " "Make sure to check if you spelled it correctly" ) # Check if embedding function and vector index dimensions match if embedding_dimension and not store.embedding_dimension == embedding_dimension: raise ValueError( "The provided embedding function and vector index " "dimensions do not match.\n" f"Embedding function dimension: {store.embedding_dimension}\n" f"Vector index dimension: {embedding_dimension}" ) if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index() # If the FTS index doesn't exist yet if not fts_node_label: raise ValueError( "The specified keyword index name does not exist. " "Make sure to check if you spelled it correctly" ) else: # Validate that FTS and Vector index use the same information if not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label" ) return store @classmethod def from_existing_relationship_index( cls: Type[Neo4jVector], embedding: Embeddings, index_name: str, search_type: SearchType = DEFAULT_SEARCH_TYPE, **kwargs: Any, ) -> Neo4jVector: """ Get instance of an existing Neo4j relationship vector index. This method will return the instance of the store without inserting any new embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters along with the `index_name` definition. """ if search_type == SearchType.HYBRID: raise ValueError( "Hybrid search is not supported in combination " "with relationship vector index" ) store = cls( embedding=embedding, index_name=index_name, **kwargs, ) embedding_dimension, index_type = store.retrieve_existing_index() if not index_type: raise ValueError( "The specified vector index name does not exist. " "Make sure to check if you spelled it correctly" ) # Raise error if relationship index type if index_type == "NODE": raise ValueError( "Node vector index is not supported with " "`from_existing_relationship_index` method. Please use the " "`from_existing_index` method." ) # Check if embedding function and vector index dimensions match if embedding_dimension and not store.embedding_dimension == embedding_dimension: raise ValueError( "The provided embedding function and vector index " "dimensions do not match.\n" f"Embedding function dimension: {store.embedding_dimension}\n" f"Vector index dimension: {embedding_dimension}" ) return store @classmethod def from_documents( cls: Type[Neo4jVector], documents: List[Document], embedding: Embeddings, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Neo4jVector: """ Return Neo4jVector initialized from documents and embeddings. Neo4j credentials are required in the form of `url`, `username`, and `password` and optional `database` parameters. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts( texts=texts, embedding=embedding, distance_strategy=distance_strategy, metadatas=metadatas, ids=ids, **kwargs, ) @classmethod def from_existing_graph( cls: Type[Neo4jVector], embedding: Embeddings, node_label: str, embedding_node_property: str, text_node_properties: List[str], *, keyword_index_name: Optional[str] = "keyword", index_name: str = "vector", search_type: SearchType = DEFAULT_SEARCH_TYPE, retrieval_query: str = "", **kwargs: Any, ) -> Neo4jVector: """ Initialize and return a Neo4jVector instance from an existing graph. This method initializes a Neo4jVector instance using the provided parameters and the existing graph. It validates the existence of the indices and creates new ones if they don't exist. Returns: Neo4jVector: An instance of Neo4jVector initialized with the provided parameters and existing graph. Example: >>> neo4j_vector = Neo4jVector.from_existing_graph( ... embedding=my_embedding, ... node_label="Document", ... embedding_node_property="embedding", ... text_node_properties=["title", "content"] ... ) Note: Neo4j credentials are required in the form of `url`, `username`, and `password`, and optional `database` parameters passed as additional keyword arguments. """ # Validate the list is not empty if not text_node_properties: raise ValueError( "Parameter `text_node_properties` must not be an empty list" ) # Prefer retrieval query from params, otherwise construct it if not retrieval_query: retrieval_query = ( f"RETURN reduce(str='', k IN {text_node_properties} |" " str + '\\n' + k + ': ' + coalesce(node[k], '')) AS text, " "node {.*, `" + embedding_node_property + "`: Null, id: Null, " + ", ".join([f"`{prop}`: Null" for prop in text_node_properties]) + "} AS metadata, score" ) store = cls( embedding=embedding, index_name=index_name, keyword_index_name=keyword_index_name, search_type=search_type, retrieval_query=retrieval_query, node_label=node_label, embedding_node_property=embedding_node_property, **kwargs, ) # Check if the vector index already exists embedding_dimension, index_type = store.retrieve_existing_index() # Raise error if relationship index type if index_type == "RELATIONSHIP": raise ValueError( "`from_existing_graph` method does not support " " existing relationship vector index. " "Please use `from_existing_relationship_index` method" ) # If the vector index doesn't exist yet if not index_type: store.create_new_index() # If the index already exists, check if embedding dimensions match elif ( embedding_dimension and not store.embedding_dimension == embedding_dimension ): raise ValueError( f"Index with name {store.index_name} already exists." "The provided embedding function and vector index " "dimensions do not match.\n" f"Embedding function dimension: {store.embedding_dimension}\n" f"Vector index dimension: {embedding_dimension}" ) # FTS index for Hybrid search if search_type == SearchType.HYBRID: fts_node_label = store.retrieve_existing_fts_index(text_node_properties) # If the FTS index doesn't exist yet if not fts_node_label: store.create_new_keyword_index(text_node_properties) else: # Validate that FTS and Vector index use the same information if not fts_node_label == store.node_label: raise ValueError( "Vector and keyword index don't index the same node label" ) # Populate embeddings while True: fetch_query = ( f"MATCH (n:`{node_label}`) " f"WHERE n.{embedding_node_property} IS null " "AND any(k in $props WHERE n[k] IS NOT null) " f"RETURN elementId(n) AS id, reduce(str=''," "k IN $props | str + '\\n' + k + ':' + coalesce(n[k], '')) AS text " "LIMIT 1000" ) data = store.query(fetch_query, params={"props": text_node_properties}) if not data: break text_embeddings = embedding.embed_documents([el["text"] for el in data]) params = { "data": [ {"id": el["id"], "embedding": embedding} for el, embedding in zip(data, text_embeddings) ] } store.query( "UNWIND $data AS row " f"MATCH (n:`{node_label}`) " "WHERE elementId(n) = row.id " f"CALL db.create.setNodeVectorProperty(n, " f"'{embedding_node_property}', row.embedding) " "RETURN count(*)", params=params, ) # If embedding calculation should be stopped if len(data) < 1000: break return store def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: search query text. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } Returns: List of Documents selected by maximal marginal relevance. """ # Embed the query query_embedding = self.embedding.embed_query(query) # Fetch the initial documents got_docs = self.similarity_search_with_score_by_vector( embedding=query_embedding, query=query, k=fetch_k, return_embeddings=True, filter=filter, **kwargs, ) # Get the embeddings for the fetched documents got_embeddings = [doc.metadata["_embedding_"] for doc, _ in got_docs] # Select documents using maximal marginal relevance selected_indices = maximal_marginal_relevance( np.array(query_embedding), got_embeddings, lambda_mult=lambda_mult, k=k ) selected_docs = [got_docs[i][0] for i in selected_indices] # Remove embedding values from metadata for doc in selected_docs: del doc.metadata["_embedding_"] return selected_docs def _select_relevance_score_fn(self) -> Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn # Default strategy is to rely on distance strategy provided # in vectorstore constructor if self._distance_strategy == DistanceStrategy.COSINE: return lambda x: x elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return lambda x: x else: raise ValueError( "No supported normalization function" f" for distance_strategy of {self._distance_strategy}." "Consider providing relevance_score_fn to PGVector constructor." )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/rocksetdb.py
from __future__ import annotations import logging from copy import deepcopy from enum import Enum from typing import Any, Iterable, List, Optional, Tuple import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.runnables import run_in_executor from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class Rockset(VectorStore): """`Rockset` vector store. To use, you should have the `rockset` python package installed. Note that to use this, the collection being used must already exist in your Rockset instance. You must also ensure you use a Rockset ingest transformation to apply `VECTOR_ENFORCE` on the column being used to store `embedding_key` in the collection. See: https://rockset.com/blog/introducing-vector-search-on-rockset/ for more details Everything below assumes `commons` Rockset workspace. Example: .. code-block:: python from langchain_community.vectorstores import Rockset from langchain_community.embeddings.openai import OpenAIEmbeddings import rockset # Make sure you use the right host (region) for your Rockset instance # and APIKEY has both read-write access to your collection. rs = rockset.RocksetClient(host=rockset.Regions.use1a1, api_key="***") collection_name = "langchain_demo" embeddings = OpenAIEmbeddings() vectorstore = Rockset(rs, collection_name, embeddings, "description", "description_embedding") """ def __init__( self, client: Any, embeddings: Embeddings, collection_name: str, text_key: str, embedding_key: str, workspace: str = "commons", ): """Initialize with Rockset client. Args: client: Rockset client object collection: Rockset collection to insert docs / query embeddings: Langchain Embeddings object to use to generate embedding for given text. text_key: column in Rockset collection to use to store the text embedding_key: column in Rockset collection to use to store the embedding. Note: We must apply `VECTOR_ENFORCE()` on this column via Rockset ingest transformation. """ try: from rockset import RocksetClient except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) if not isinstance(client, RocksetClient): raise ValueError( f"client should be an instance of rockset.RocksetClient, " f"got {type(client)}" ) # TODO: check that `collection_name` exists in rockset. Create if not. self._client = client self._collection_name = collection_name self._embeddings = embeddings self._text_key = text_key self._embedding_key = embedding_key self._workspace = workspace try: self._client.set_application("langchain") except AttributeError: # ignore pass @property def embeddings(self) -> Embeddings: return self._embeddings def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids to associate with the texts. batch_size: Send documents in batches to rockset. Returns: List of ids from adding the texts into the vectorstore. """ batch: list[dict] = [] stored_ids = [] for i, text in enumerate(texts): if len(batch) == batch_size: stored_ids += self._write_documents_to_rockset(batch) batch = [] doc = {} if metadatas and len(metadatas) > i: doc = deepcopy(metadatas[i]) if ids and len(ids) > i: doc["_id"] = ids[i] doc[self._text_key] = text doc[self._embedding_key] = self._embeddings.embed_query(text) batch.append(doc) if len(batch) > 0: stored_ids += self._write_documents_to_rockset(batch) batch = [] return stored_ids @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Any = None, collection_name: str = "", text_key: str = "", embedding_key: str = "", ids: Optional[List[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> Rockset: """Create Rockset wrapper with existing texts. This is intended as a quicker way to get started. """ # Sanitize inputs assert client is not None, "Rockset Client cannot be None" assert collection_name, "Collection name cannot be empty" assert text_key, "Text key name cannot be empty" assert embedding_key, "Embedding key cannot be empty" rockset = cls(client, embedding, collection_name, text_key, embedding_key) rockset.add_texts(texts, metadatas, ids, batch_size) return rockset # Rockset supports these vector distance functions. class DistanceFunction(Enum): COSINE_SIM = "COSINE_SIM" EUCLIDEAN_DIST = "EUCLIDEAN_DIST" DOT_PRODUCT = "DOT_PRODUCT" # how to sort results for "similarity" def order_by(self) -> str: if self.value == "EUCLIDEAN_DIST": return "ASC" return "DESC" def similarity_search_with_relevance_scores( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a similarity search with Rockset Args: query (str): Text to look up documents similar to. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): Metadata filters supplied as a SQL `where` condition string. Defaults to None. eg. "price<=70.0 AND brand='Nintendo'" NOTE: Please do not let end-user to fill this and always be aware of SQL injection. Returns: List[Tuple[Document, float]]: List of documents with their relevance score """ return self.similarity_search_by_vector_with_relevance_scores( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) def similarity_search( self, query: str, k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Same as `similarity_search_with_relevance_scores` but doesn't return the scores. """ return self.similarity_search_by_vector( self._embeddings.embed_query(query), k, distance_func, where_str, **kwargs, ) def similarity_search_by_vector( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Accepts a query_embedding (vector), and returns documents with similar embeddings.""" docs_and_scores = self.similarity_search_by_vector_with_relevance_scores( embedding, k, distance_func, where_str, **kwargs ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector_with_relevance_scores( self, embedding: List[float], k: int = 4, distance_func: DistanceFunction = DistanceFunction.COSINE_SIM, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Accepts a query_embedding (vector), and returns documents with similar embeddings along with their relevance scores.""" exclude_embeddings = True if "exclude_embeddings" in kwargs: exclude_embeddings = kwargs["exclude_embeddings"] q_str = self._build_query_sql( embedding, distance_func, k, where_str, exclude_embeddings ) try: query_response = self._client.Queries.query(sql={"query": q_str}) except Exception as e: logger.error("Exception when querying Rockset: %s\n", e) return [] finalResult: list[Tuple[Document, float]] = [] for document in query_response.results: metadata = {} assert isinstance( document, dict ), "document should be of type `dict[str,Any]`. But found: `{}`".format( type(document) ) for k, v in document.items(): if k == self._text_key: assert isinstance(v, str), ( "page content stored in column `{}` must be of type `str`. " "But found: `{}`" ).format(self._text_key, type(v)) page_content = v elif k == "dist": assert isinstance(v, float), ( "Computed distance between vectors must of type `float`. " "But found {}" ).format(type(v)) score = v elif k not in ["_id", "_event_time", "_meta"]: # These columns are populated by Rockset when documents are # inserted. No need to return them in metadata dict. metadata[k] = v finalResult.append( (Document(page_content=page_content, metadata=metadata), score) ) return finalResult def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, *, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. distance_func (DistanceFunction): how to compute distance between two vectors in Rockset. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. where_str: where clause for the sql query Returns: List of Documents selected by maximal marginal relevance. """ query_embedding = self._embeddings.embed_query(query) initial_docs = self.similarity_search_by_vector( query_embedding, k=fetch_k, where_str=where_str, exclude_embeddings=False, **kwargs, ) embeddings = [doc.metadata[self._embedding_key] for doc in initial_docs] selected_indices = maximal_marginal_relevance( np.array(query_embedding), embeddings, lambda_mult=lambda_mult, k=k, ) # remove embeddings key before returning for cleanup to be consistent with # other search functions for i in selected_indices: del initial_docs[i].metadata[self._embedding_key] return [initial_docs[i] for i in selected_indices] # Helper functions def _build_query_sql( self, query_embedding: List[float], distance_func: DistanceFunction, k: int = 4, where_str: Optional[str] = None, exclude_embeddings: bool = True, ) -> str: """Builds Rockset SQL query to query similar vectors to query_vector""" q_embedding_str = ",".join(map(str, query_embedding)) distance_str = f"""{distance_func.value}({self._embedding_key}, \ [{q_embedding_str}]) as dist""" where_str = f"WHERE {where_str}\n" if where_str else "" select_embedding = ( f" EXCEPT({self._embedding_key})," if exclude_embeddings else "," ) return f"""\ SELECT *{select_embedding} {distance_str} FROM {self._workspace}.{self._collection_name} {where_str}\ ORDER BY dist {distance_func.order_by()} LIMIT {str(k)} """ def _write_documents_to_rockset(self, batch: List[dict]) -> List[str]: add_doc_res = self._client.Documents.add_documents( collection=self._collection_name, data=batch, workspace=self._workspace ) return [doc_status._id for doc_status in add_doc_res.data] def delete_texts(self, ids: List[str]) -> None: """Delete a list of docs from the Rockset collection""" try: from rockset.models import DeleteDocumentsRequestData except ImportError: raise ImportError( "Could not import rockset client python package. " "Please install it with `pip install rockset`." ) self._client.Documents.delete_documents( collection=self._collection_name, data=[DeleteDocumentsRequestData(id=i) for i in ids], workspace=self._workspace, ) def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: try: if ids is None: ids = [] self.delete_texts(ids) except Exception as e: logger.error("Exception when deleting docs from Rockset: %s\n", e) return False return True async def adelete( self, ids: Optional[List[str]] = None, **kwargs: Any ) -> Optional[bool]: return await run_in_executor(None, self.delete, ids, **kwargs)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/vlite.py
from __future__ import annotations # Standard library imports from typing import Any, Dict, Iterable, List, Optional, Tuple from uuid import uuid4 # LangChain imports from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore class VLite(VectorStore): """VLite is a simple and fast vector database for semantic search.""" def __init__( self, embedding_function: Embeddings, collection: Optional[str] = None, **kwargs: Any, ): super().__init__() self.embedding_function = embedding_function self.collection = collection or f"vlite_{uuid4().hex}" # Third-party imports try: from vlite import VLite except ImportError: raise ImportError( "Could not import vlite python package. " "Please install it with `pip install vlite`." ) self.vlite = VLite(collection=self.collection, **kwargs) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ texts = list(texts) ids = kwargs.pop("ids", [str(uuid4()) for _ in texts]) embeddings = self.embedding_function.embed_documents(texts) if not metadatas: metadatas = [{} for _ in texts] data_points = [ {"text": text, "metadata": metadata, "id": id, "embedding": embedding} for text, metadata, id, embedding in zip(texts, metadatas, ids, embeddings) ] results = self.vlite.add(data_points) return [result[0] for result in results] def add_documents( self, documents: List[Document], **kwargs: Any, ) -> List[str]: """Add a list of documents to the vectorstore. Args: documents: List of documents to add to the vectorstore. kwargs: vectorstore specific parameters such as "file_path" for processing directly with vlite. Returns: List of ids from adding the documents into the vectorstore. """ ids = kwargs.pop("ids", [str(uuid4()) for _ in documents]) texts = [] metadatas = [] for doc, id in zip(documents, ids): if "file_path" in kwargs: # Third-party imports try: from vlite.utils import process_file except ImportError: raise ImportError( "Could not import vlite python package. " "Please install it with `pip install vlite`." ) processed_data = process_file(kwargs["file_path"]) texts.extend(processed_data) metadatas.extend([doc.metadata] * len(processed_data)) ids.extend([f"{id}_{i}" for i in range(len(processed_data))]) else: texts.append(doc.page_content) metadatas.append(doc.metadata) return self.add_texts(texts, metadatas, ids=ids) def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k=k) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. Returns: List of Tuples of (doc, score), where score is the similarity score. """ metadata = filter or {} embedding = self.embedding_function.embed_query(query) results = self.vlite.retrieve( text=query, top_k=k, metadata=metadata, return_scores=True, embedding=embedding, ) documents_with_scores = [ (Document(page_content=text, metadata=metadata), score) for text, score, metadata in results ] return documents_with_scores def update_document(self, document_id: str, document: Document) -> None: """Update an existing document in the vectorstore.""" self.vlite.update( document_id, text=document.page_content, metadata=document.metadata ) def get(self, ids: List[str]) -> List[Document]: """Get documents by their IDs.""" results = self.vlite.get(ids) documents = [ Document(page_content=text, metadata=metadata) for text, metadata in results ] return documents def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by ids.""" if ids is not None: self.vlite.delete(ids, **kwargs) return True return None @classmethod def from_existing_index( cls, embedding: Embeddings, collection: str, **kwargs: Any, ) -> VLite: """Load an existing VLite index. Args: embedding: Embedding function collection: Name of the collection to load. Returns: VLite vector store. """ vlite = cls(embedding_function=embedding, collection=collection, **kwargs) return vlite @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection: Optional[str] = None, **kwargs: Any, ) -> VLite: """Construct VLite wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to the vectorstore. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import VLite from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vlite = VLite.from_texts(texts, embeddings) """ vlite = cls(embedding_function=embedding, collection=collection, **kwargs) vlite.add_texts(texts, metadatas, **kwargs) return vlite @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, collection: Optional[str] = None, **kwargs: Any, ) -> VLite: """Construct VLite wrapper from a list of documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to the vectorstore. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import VLite from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vlite = VLite.from_documents(documents, embeddings) """ vlite = cls(embedding_function=embedding, collection=collection, **kwargs) vlite.add_documents(documents, **kwargs) return vlite
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/zep.py
from __future__ import annotations import logging import warnings from dataclasses import asdict, dataclass from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from zep_python.document import Document as ZepDocument from zep_python.document import DocumentCollection logger = logging.getLogger() @dataclass class CollectionConfig: """Configuration for a `Zep Collection`. If the collection does not exist, it will be created. Attributes: name (str): The name of the collection. description (Optional[str]): An optional description of the collection. metadata (Optional[Dict[str, Any]]): Optional metadata for the collection. embedding_dimensions (int): The number of dimensions for the embeddings in the collection. This should match the Zep server configuration if auto-embed is true. is_auto_embedded (bool): A flag indicating whether the collection is automatically embedded by Zep. """ name: str description: Optional[str] metadata: Optional[Dict[str, Any]] embedding_dimensions: int is_auto_embedded: bool class ZepVectorStore(VectorStore): """`Zep` vector store. It provides methods for adding texts or documents to the store, searching for similar documents, and deleting documents. Search scores are calculated using cosine similarity normalized to [0, 1]. Args: api_url (str): The URL of the Zep API. collection_name (str): The name of the collection in the Zep store. api_key (Optional[str]): The API key for the Zep API. config (Optional[CollectionConfig]): The configuration for the collection. Required if the collection does not already exist. embedding (Optional[Embeddings]): Optional embedding function to use to embed the texts. Required if the collection is not auto-embedded. """ def __init__( self, collection_name: str, api_url: str, *, api_key: Optional[str] = None, config: Optional[CollectionConfig] = None, embedding: Optional[Embeddings] = None, ) -> None: super().__init__() if not collection_name: raise ValueError( "collection_name must be specified when using ZepVectorStore." ) try: from zep_python import ZepClient except ImportError: raise ImportError( "Could not import zep-python python package. " "Please install it with `pip install zep-python`." ) self._client = ZepClient(api_url, api_key=api_key) self.collection_name = collection_name # If for some reason the collection name is not the same as the one in the # config, update it. if config and config.name != self.collection_name: config.name = self.collection_name self._collection_config = config self._collection = self._load_collection() self._embedding = embedding # self.add_texts(texts, metadatas=metadatas, **kwargs) @property def embeddings(self) -> Optional[Embeddings]: """Access the query embedding object if available.""" return self._embedding def _load_collection(self) -> DocumentCollection: """ Load the collection from the Zep backend. """ from zep_python import NotFoundError try: collection = self._client.document.get_collection(self.collection_name) except NotFoundError: logger.info( f"Collection {self.collection_name} not found. Creating new collection." ) collection = self._create_collection() return collection def _create_collection(self) -> DocumentCollection: """ Create a new collection in the Zep backend. """ if not self._collection_config: raise ValueError( "Collection config must be specified when creating a new collection." ) collection = self._client.document.add_collection( **asdict(self._collection_config) ) return collection def _generate_documents_to_add( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, document_ids: Optional[List[str]] = None, ) -> List[ZepDocument]: from zep_python.document import Document as ZepDocument embeddings = None if self._collection and self._collection.is_auto_embedded: if self._embedding is not None: warnings.warn( """The collection is set to auto-embed and an embedding function is present. Ignoring the embedding function.""", stacklevel=2, ) elif self._embedding is not None: embeddings = self._embedding.embed_documents(list(texts)) if self._collection and self._collection.embedding_dimensions != len( embeddings[0] ): raise ValueError( "The embedding dimensions of the collection and the embedding" " function do not match. Collection dimensions:" f" {self._collection.embedding_dimensions}, Embedding dimensions:" f" {len(embeddings[0])}" ) else: pass documents: List[ZepDocument] = [] for i, d in enumerate(texts): documents.append( ZepDocument( content=d, metadata=metadatas[i] if metadatas else None, document_id=document_ids[i] if document_ids else None, embedding=embeddings[i] if embeddings else None, ) ) return documents def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, document_ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. document_ids: Optional list of document ids associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) documents = self._generate_documents_to_add(texts, metadatas, document_ids) uuids = self._collection.add_documents(documents) return uuids async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, document_ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) documents = self._generate_documents_to_add(texts, metadatas, document_ids) uuids = await self._collection.aadd_documents(documents) return uuids def search( self, query: str, search_type: str, metadata: Optional[Dict[str, Any]] = None, k: int = 3, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity": return self.similarity_search(query, k=k, metadata=metadata, **kwargs) elif search_type == "mmr": return self.max_marginal_relevance_search( query, k=k, metadata=metadata, **kwargs ) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) async def asearch( self, query: str, search_type: str, metadata: Optional[Dict[str, Any]] = None, k: int = 3, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query using specified search type.""" if search_type == "similarity": return await self.asimilarity_search( query, k=k, metadata=metadata, **kwargs ) elif search_type == "mmr": return await self.amax_marginal_relevance_search( query, k=k, metadata=metadata, **kwargs ) else: raise ValueError( f"search_type of {search_type} not allowed. Expected " "search_type to be 'similarity' or 'mmr'." ) def similarity_search( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" results = self._similarity_search_with_relevance_scores( query, k=k, metadata=metadata, **kwargs ) return [doc for doc, _ in results] def similarity_search_with_score( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with distance.""" return self._similarity_search_with_relevance_scores( query, k=k, metadata=metadata, **kwargs ) def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Default similarity search with relevance scores. Modify if necessary in subclass. Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. Args: query: input text k: Number of Documents to return. Defaults to 4. metadata: Optional, metadata filter **kwargs: kwargs to be passed to similarity search. Should include: score_threshold: Optional, a floating point value between 0 to 1 and filter the resulting set of retrieved docs Returns: List of Tuples of (doc, similarity_score) """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = self._collection.search( embedding=query_vector, limit=k, metadata=metadata, **kwargs ) else: results = self._collection.search( query, limit=k, metadata=metadata, **kwargs ) return [ ( Document( page_content=doc.content, metadata=doc.metadata, ), doc.score or 0.0, ) for doc in results ] async def asimilarity_search_with_relevance_scores( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = await self._collection.asearch( embedding=query_vector, limit=k, metadata=metadata, **kwargs ) else: results = await self._collection.asearch( query, limit=k, metadata=metadata, **kwargs ) return [ ( Document( page_content=doc.content, metadata=doc.metadata, ), doc.score or 0.0, ) for doc in results ] async def asimilarity_search( self, query: str, k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query.""" results = await self.asimilarity_search_with_relevance_scores( query, k, metadata=metadata, **kwargs ) return [doc for doc, _ in results] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. metadata: Optional, metadata filter Returns: List of Documents most similar to the query vector. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = self._collection.search( embedding=embedding, limit=k, metadata=metadata, **kwargs ) return [ Document( page_content=doc.content, metadata=doc.metadata, ) for doc in results ] async def asimilarity_search_by_vector( self, embedding: List[float], k: int = 4, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = self._collection.search( embedding=embedding, limit=k, metadata=metadata, **kwargs ) return [ Document( page_content=doc.content, metadata=doc.metadata, ) for doc in results ] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Zep determines this automatically and this parameter is ignored. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata: Optional, metadata to filter the resulting set of retrieved docs Returns: List of Documents selected by maximal marginal relevance. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = self._collection.search( embedding=query_vector, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) else: results, query_vector = self._collection.search_return_query_vector( query, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) if not self._collection.is_auto_embedded and self._embedding: query_vector = self._embedding.embed_query(query) results = await self._collection.asearch( embedding=query_vector, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) else: results, query_vector = await self._collection.asearch_return_query_vector( query, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Zep determines this automatically and this parameter is ignored. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata: Optional, metadata to filter the resulting set of retrieved docs Returns: List of Documents selected by maximal marginal relevance. """ if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = self._collection.search( embedding=embedding, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" if not self._collection: raise ValueError( "collection should be an instance of a Zep DocumentCollection" ) results = await self._collection.asearch( embedding=embedding, limit=k, metadata=metadata, search_type="mmr", mmr_lambda=lambda_mult, **kwargs, ) return [Document(page_content=d.content, metadata=d.metadata) for d in results] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, collection_name: str = "", api_url: str = "", api_key: Optional[str] = None, config: Optional[CollectionConfig] = None, **kwargs: Any, ) -> ZepVectorStore: """ Class method that returns a ZepVectorStore instance initialized from texts. If the collection does not exist, it will be created. Args: texts (List[str]): The list of texts to add to the vectorstore. embedding (Optional[Embeddings]): Optional embedding function to use to embed the texts. metadatas (Optional[List[Dict[str, Any]]]): Optional list of metadata associated with the texts. collection_name (str): The name of the collection in the Zep store. api_url (str): The URL of the Zep API. api_key (Optional[str]): The API key for the Zep API. config (Optional[CollectionConfig]): The configuration for the collection. kwargs: Additional parameters specific to the vectorstore. Returns: ZepVectorStore: An instance of ZepVectorStore. """ vecstore = cls( collection_name, api_url, api_key=api_key, config=config, embedding=embedding, ) vecstore.add_texts(texts, metadatas) return vecstore def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: """Delete by Zep vector UUIDs. Parameters ---------- ids : Optional[List[str]] The UUIDs of the vectors to delete. Raises ------ ValueError If no UUIDs are provided. """ if ids is None or len(ids) == 0: raise ValueError("No uuids provided to delete.") if self._collection is None: raise ValueError("No collection name provided.") for u in ids: self._collection.delete_document(u)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/vespa.py
from __future__ import annotations from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore, VectorStoreRetriever class VespaStore(VectorStore): """ `Vespa` vector store. To use, you should have the python client library ``pyvespa`` installed. Example: .. code-block:: python from langchain_community.vectorstores import VespaStore from langchain_community.embeddings.openai import OpenAIEmbeddings from vespa.application import Vespa # Create a vespa client dependent upon your application, # e.g. either connecting to Vespa Cloud or a local deployment # such as Docker. Please refer to the PyVespa documentation on # how to initialize the client. vespa_app = Vespa(url="...", port=..., application_package=...) # You need to instruct LangChain on which fields to use for embeddings vespa_config = dict( page_content_field="text", embedding_field="embedding", input_field="query_embedding", metadata_fields=["date", "rating", "author"] ) embedding_function = OpenAIEmbeddings() vectorstore = VespaStore(vespa_app, embedding_function, **vespa_config) """ def __init__( self, app: Any, embedding_function: Optional[Embeddings] = None, page_content_field: Optional[str] = None, embedding_field: Optional[str] = None, input_field: Optional[str] = None, metadata_fields: Optional[List[str]] = None, ) -> None: """ Initialize with a PyVespa client. """ try: from vespa.application import Vespa except ImportError: raise ImportError( "Could not import Vespa python package. " "Please install it with `pip install pyvespa`." ) if not isinstance(app, Vespa): raise ValueError( f"app should be an instance of vespa.application.Vespa, got {type(app)}" ) self._vespa_app = app self._embedding_function = embedding_function self._page_content_field = page_content_field self._embedding_field = embedding_field self._input_field = input_field self._metadata_fields = metadata_fields def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """ Add texts to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of ids associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ embeddings = None if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(list(texts)) if ids is None: ids = [str(f"{i+1}") for i, _ in enumerate(texts)] batch = [] for i, text in enumerate(texts): fields: Dict[str, Union[str, List[float]]] = {} if self._page_content_field is not None: fields[self._page_content_field] = text if self._embedding_field is not None and embeddings is not None: fields[self._embedding_field] = embeddings[i] if metadatas is not None and self._metadata_fields is not None: for metadata_field in self._metadata_fields: if metadata_field in metadatas[i]: fields[metadata_field] = metadatas[i][metadata_field] batch.append({"id": ids[i], "fields": fields}) results = self._vespa_app.feed_batch(batch) for result in results: if not (str(result.status_code).startswith("2")): raise RuntimeError( f"Could not add document to Vespa. " f"Error code: {result.status_code}. " f"Message: {result.json['message']}" ) return ids def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: if ids is None: return False batch = [{"id": id} for id in ids] result = self._vespa_app.delete_batch(batch) return sum([0 if r.status_code == 200 else 1 for r in result]) == 0 def _create_query( self, query_embedding: List[float], k: int = 4, **kwargs: Any ) -> Dict: hits = k doc_embedding_field = self._embedding_field input_embedding_field = self._input_field ranking_function = kwargs["ranking"] if "ranking" in kwargs else "default" filter = kwargs["filter"] if "filter" in kwargs else None approximate = kwargs["approximate"] if "approximate" in kwargs else False approximate = "true" if approximate else "false" yql = "select * from sources * where " yql += f"{{targetHits: {hits}, approximate: {approximate}}}" yql += f"nearestNeighbor({doc_embedding_field}, {input_embedding_field})" if filter is not None: yql += f" and {filter}" query = { "yql": yql, f"input.query({input_embedding_field})": query_embedding, "ranking": ranking_function, "hits": hits, } return query def similarity_search_by_vector_with_score( self, query_embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: """ Performs similarity search from a embeddings vector. Args: query_embedding: Embeddings vector to search for. k: Number of results to return. custom_query: Use this custom query instead default query (kwargs) kwargs: other vector store specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if "custom_query" in kwargs: query = kwargs["custom_query"] else: query = self._create_query(query_embedding, k, **kwargs) try: response = self._vespa_app.query(body=query) except Exception as e: raise RuntimeError( f"Could not retrieve data from Vespa: " f"{e.args[0][0]['summary']}. " f"Error: {e.args[0][0]['message']}" ) if not str(response.status_code).startswith("2"): raise RuntimeError( f"Could not retrieve data from Vespa. " f"Error code: {response.status_code}. " f"Message: {response.json['message']}" ) root = response.json["root"] if "errors" in root: import json raise RuntimeError(json.dumps(root["errors"])) if response is None or response.hits is None: return [] docs = [] for child in response.hits: page_content = child["fields"][self._page_content_field] score = child["relevance"] metadata = {"id": child["id"]} if self._metadata_fields is not None: for field in self._metadata_fields: metadata[field] = child["fields"].get(field) doc = Document(page_content=page_content, metadata=metadata) docs.append((doc, score)) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: results = self.similarity_search_by_vector_with_score(embedding, k, **kwargs) return [r[0] for r in results] def similarity_search_with_score( self, query: str, k: int = 4, **kwargs: Any ) -> List[Tuple[Document, float]]: query_emb = [] if self._embedding_function is not None: query_emb = self._embedding_function.embed_query(query) return self.similarity_search_by_vector_with_score(query_emb, k, **kwargs) def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: results = self.similarity_search_with_score(query, k, **kwargs) return [r[0] for r in results] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: raise NotImplementedError("MMR search not implemented") def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: raise NotImplementedError("MMR search by vector not implemented") @classmethod def from_texts( cls: Type[VespaStore], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> VespaStore: vespa = cls(embedding_function=embedding, **kwargs) vespa.add_texts(texts=texts, metadatas=metadatas, ids=ids) return vespa def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever: return super().as_retriever(**kwargs)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/documentdb.py
from __future__ import annotations import logging from enum import Enum from typing import ( TYPE_CHECKING, Any, Dict, Generator, Iterable, List, Optional, TypeVar, Union, ) from langchain_core.documents import Document from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from langchain_core.embeddings import Embeddings from pymongo.collection import Collection # Before Python 3.11 native StrEnum is not available class DocumentDBSimilarityType(str, Enum): """DocumentDB Similarity Type as enumerator.""" COS = "cosine" """Cosine similarity""" DOT = "dotProduct" """Dot product""" EUC = "euclidean" """Euclidean distance""" DocumentDBDocumentType = TypeVar("DocumentDBDocumentType", bound=Dict[str, Any]) logger = logging.getLogger(__name__) DEFAULT_INSERT_BATCH_SIZE = 128 class DocumentDBVectorSearch(VectorStore): """`Amazon DocumentDB (with MongoDB compatibility)` vector store. Please refer to the official Vector Search documentation for more details: https://docs.aws.amazon.com/documentdb/latest/developerguide/vector-search.html To use, you should have both: - the ``pymongo`` python package installed - a connection string and credentials associated with a DocumentDB cluster Example: . code-block:: python from langchain_community.vectorstores import DocumentDBVectorSearch from langchain_community.embeddings.openai import OpenAIEmbeddings from pymongo import MongoClient mongo_client = MongoClient("<YOUR-CONNECTION-STRING>") collection = mongo_client["<db_name>"]["<collection_name>"] embeddings = OpenAIEmbeddings() vectorstore = DocumentDBVectorSearch(collection, embeddings) """ def __init__( self, collection: Collection[DocumentDBDocumentType], embedding: Embeddings, *, index_name: str = "vectorSearchIndex", text_key: str = "textContent", embedding_key: str = "vectorContent", ): """Constructor for DocumentDBVectorSearch Args: collection: MongoDB collection to add the texts to. embedding: Text embedding model to use. index_name: Name of the Vector Search index. text_key: MongoDB field that will contain the text for each document. embedding_key: MongoDB field that will contain the embedding for each document. """ self._collection = collection self._embedding = embedding self._index_name = index_name self._text_key = text_key self._embedding_key = embedding_key self._similarity_type = DocumentDBSimilarityType.COS @property def embeddings(self) -> Embeddings: return self._embedding def get_index_name(self) -> str: """Returns the index name Returns: Returns the index name """ return self._index_name @classmethod def from_connection_string( cls, connection_string: str, namespace: str, embedding: Embeddings, **kwargs: Any, ) -> DocumentDBVectorSearch: """Creates an Instance of DocumentDBVectorSearch from a Connection String Args: connection_string: The DocumentDB cluster endpoint connection string namespace: The namespace (database.collection) embedding: The embedding utility **kwargs: Dynamic keyword arguments Returns: an instance of the vector store """ try: from pymongo import MongoClient except ImportError: raise ImportError( "Could not import pymongo, please install it with " "`pip install pymongo`." ) client: MongoClient = MongoClient(connection_string) db_name, collection_name = namespace.split(".") collection = client[db_name][collection_name] return cls(collection, embedding, **kwargs) def index_exists(self) -> bool: """Verifies if the specified index name during instance construction exists on the collection Returns: Returns True on success and False if no such index exists on the collection """ cursor = self._collection.list_indexes() index_name = self._index_name for res in cursor: current_index_name = res.pop("name") if current_index_name == index_name: return True return False def delete_index(self) -> None: """Deletes the index specified during instance construction if it exists""" if self.index_exists(): self._collection.drop_index(self._index_name) # Raises OperationFailure on an error (e.g. trying to drop # an index that does not exist) def create_index( self, dimensions: int = 1536, similarity: DocumentDBSimilarityType = DocumentDBSimilarityType.COS, m: int = 16, ef_construction: int = 64, ) -> dict[str, Any]: """Creates an index using the index name specified at instance construction Args: dimensions: Number of dimensions for vector similarity. The maximum number of supported dimensions is 2000 similarity: Similarity algorithm to use with the HNSW index. Possible options are: - DocumentDBSimilarityType.COS (cosine distance), - DocumentDBSimilarityType.EUC (Euclidean distance), and - DocumentDBSimilarityType.DOT (dot product). m: Specifies the max number of connections for an HNSW index. Large impact on memory consumption. ef_construction: Specifies the size of the dynamic candidate list for constructing the graph for HNSW index. Higher values lead to more accurate results but slower indexing speed. Returns: An object describing the created index """ self._similarity_type = similarity # prepare the command create_index_commands = { "createIndexes": self._collection.name, "indexes": [ { "name": self._index_name, "key": {self._embedding_key: "vector"}, "vectorOptions": { "type": "hnsw", "similarity": similarity, "dimensions": dimensions, "m": m, "efConstruction": ef_construction, }, } ], } # retrieve the database object current_database = self._collection.database # invoke the command from the database object create_index_responses: dict[str, Any] = current_database.command( create_index_commands ) return create_index_responses def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> List: batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE) _metadatas: Union[List, Generator] = metadatas or ({} for _ in texts) texts_batch = [] metadatas_batch = [] result_ids = [] for i, (text, metadata) in enumerate(zip(texts, _metadatas)): texts_batch.append(text) metadatas_batch.append(metadata) if (i + 1) % batch_size == 0: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) texts_batch = [] metadatas_batch = [] if texts_batch: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) return result_ids def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List: """Used to Load Documents into the collection Args: texts: The list of documents strings to load metadatas: The list of metadata objects associated with each document Returns: """ # If the text is empty, then exit early if not texts: return [] # Embed and create the documents embeddings = self._embedding.embed_documents(texts) to_insert = [ {self._text_key: t, self._embedding_key: embedding, **m} for t, m, embedding in zip(texts, metadatas, embeddings) ] # insert the documents in DocumentDB insert_result = self._collection.insert_many(to_insert) # type: ignore return insert_result.inserted_ids @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection: Optional[Collection[DocumentDBDocumentType]] = None, **kwargs: Any, ) -> DocumentDBVectorSearch: if collection is None: raise ValueError("Must provide 'collection' named parameter.") vectorstore = cls(collection, embedding, **kwargs) vectorstore.add_texts(texts, metadatas=metadatas) return vectorstore def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: if ids is None: raise ValueError("No document ids provided to delete.") for document_id in ids: self.delete_document_by_id(document_id) return True def delete_document_by_id(self, document_id: Optional[str] = None) -> None: """Removes a Specific Document by Id Args: document_id: The document identifier """ try: from bson.objectid import ObjectId except ImportError as e: raise ImportError( "Unable to import bson, please install with `pip install bson`." ) from e if document_id is None: raise ValueError("No document id provided to delete.") self._collection.delete_one({"_id": ObjectId(document_id)}) def _similarity_search_without_score( self, embeddings: List[float], k: int = 4, ef_search: int = 40, filter: Optional[Dict[str, Any]] = None, ) -> List[Document]: """Returns a list of documents. Args: embeddings: The query vector k: the number of documents to return ef_search: Specifies the size of the dynamic candidate list that HNSW index uses during search. A higher value of efSearch provides better recall at cost of speed. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: A list of documents closest to the query vector """ # $match can't be null, so intializes to {} when None to avoid # "the match filter must be an expression in an object" if not filter: filter = {} pipeline: List[dict[str, Any]] = [ {"$match": filter}, { "$search": { "vectorSearch": { "vector": embeddings, "path": self._embedding_key, "similarity": self._similarity_type, "k": k, "efSearch": ef_search, } }, }, ] cursor = self._collection.aggregate(pipeline) docs = [] for res in cursor: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs def similarity_search( self, query: str, k: int = 4, ef_search: int = 40, *, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: embeddings = self._embedding.embed_query(query) docs = self._similarity_search_without_score( embeddings=embeddings, k=k, ef_search=ef_search, filter=filter ) return [doc for doc in docs]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/baiducloud_vector_search.py
import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, ) from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from elasticsearch import Elasticsearch logger = logging.getLogger(__name__) class BESVectorStore(VectorStore): """`Baidu Elasticsearch` vector store. Example: .. code-block:: python from langchain_community.vectorstores import BESVectorStore from langchain_community.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = BESVectorStore( embedding=OpenAIEmbeddings(), index_name="langchain-demo", bes_url="http://localhost:9200" ) Args: index_name: Name of the Elasticsearch index to create. bes_url: URL of the Baidu Elasticsearch instance to connect to. user: Username to use when connecting to Elasticsearch. password: Password to use when connecting to Elasticsearch. More information can be obtained from: https://cloud.baidu.com/doc/BES/s/8llyn0hh4 """ def __init__( self, index_name: str, bes_url: str, user: Optional[str] = None, password: Optional[str] = None, embedding: Optional[Embeddings] = None, **kwargs: Optional[dict], ) -> None: self.embedding = embedding self.index_name = index_name self.query_field = kwargs.get("query_field", "text") self.vector_query_field = kwargs.get("vector_query_field", "vector") self.space_type = kwargs.get("space_type", "cosine") self.index_type = kwargs.get("index_type", "linear") self.index_params = kwargs.get("index_params") or {} if bes_url is not None: self.client = BESVectorStore.bes_client( bes_url=bes_url, username=user, password=password ) else: raise ValueError("""Please specified a bes connection url.""") @property def embeddings(self) -> Optional[Embeddings]: return self.embedding @staticmethod def bes_client( *, bes_url: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> "Elasticsearch": try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) connection_params: Dict[str, Any] = {} connection_params["hosts"] = [bes_url] if username and password: connection_params["basic_auth"] = (username, password) es_client = elasticsearch.Elasticsearch(**connection_params) try: es_client.info() except Exception as e: logger.error(f"Error connecting to Elasticsearch: {e}") raise e return es_client def _create_index_if_not_exists(self, dims_length: Optional[int] = None) -> None: """Create the index if it doesn't already exist. Args: dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=self.index_name): logger.info(f"Index {self.index_name} already exists. Skipping creation.") else: if dims_length is None: raise ValueError( "Cannot create index without specifying dims_length " + "when the index doesn't already exist. " ) indexMapping = self._index_mapping(dims_length=dims_length) logger.debug( f"Creating index {self.index_name} with mappings {indexMapping}" ) self.client.indices.create( index=self.index_name, body={ "settings": {"index": {"knn": True}}, "mappings": {"properties": indexMapping}, }, ) def _index_mapping(self, dims_length: Union[int, None]) -> Dict: """ Executes when the index is created. Args: dims_length: Numeric length of the embedding vectors, or None if not using vector-based query. index_params: The extra pamameters for creating index. Returns: Dict: The Elasticsearch settings and mappings for the strategy. """ if "linear" == self.index_type: return { self.vector_query_field: { "type": "bpack_vector", "dims": dims_length, "build_index": self.index_params.get("build_index", False), } } elif "hnsw" == self.index_type: return { self.vector_query_field: { "type": "bpack_vector", "dims": dims_length, "index_type": "hnsw", "space_type": self.space_type, "parameters": { "ef_construction": self.index_params.get( "hnsw_ef_construction", 200 ), "m": self.index_params.get("hnsw_m", 4), }, } } else: return { self.vector_query_field: { "type": "bpack_vector", "model_id": self.index_params.get("model_id", ""), } } def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete documents from the index. Args: ids: List of ids of documents to delete """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) body = [] if ids is None: raise ValueError("ids must be provided.") for _id in ids: body.append({"_op_type": "delete", "_index": self.index_name, "_id": _id}) if len(body) > 0: try: bulk( self.client, body, refresh=kwargs.get("refresh_indices", True), ignore_status=404, ) logger.debug(f"Deleted {len(body)} texts from index") return True except BulkIndexError as e: logger.error(f"Error deleting texts: {e}") raise e else: logger.info("No documents to delete") return False def _query_body( self, query_vector: Union[List[float], None], filter: Optional[dict] = None, search_params: Dict = {}, ) -> Dict: query_vector_body = {"vector": query_vector, "k": search_params.get("k", 2)} if filter is not None and len(filter) != 0: query_vector_body["filter"] = filter if "linear" == self.index_type: query_vector_body["linear"] = True else: query_vector_body["ef"] = search_params.get("ef", 10) return { "size": search_params.get("size", 4), "query": {"knn": {self.vector_query_field: query_vector_body}}, } def _search( self, query: Optional[str] = None, query_vector: Union[List[float], None] = None, filter: Optional[dict] = None, custom_query: Optional[Callable[[Dict, Union[str, None]], Dict]] = None, search_params: Dict = {}, ) -> List[Tuple[Document, float]]: """Return searched documents result from BES Args: query: Text to look up documents similar to. query_vector: Embedding to look up documents similar to. filter: Array of Baidu ElasticSearch filter clauses to apply to the query. custom_query: Function to modify the query body before it is sent to BES. Returns: List of Documents most similar to the query and score for each """ if self.embedding and query is not None: query_vector = self.embedding.embed_query(query) query_body = self._query_body( query_vector=query_vector, filter=filter, search_params=search_params ) if custom_query is not None: query_body = custom_query(query_body, query) logger.debug(f"Calling custom_query, Query body now: {query_body}") logger.debug(f"Query body: {query_body}") # Perform the kNN search on the BES index and return the results. response = self.client.search(index=self.index_name, body=query_body) logger.debug(f"response={response}") hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"][self.query_field], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query, in descending order of similarity. """ results = self.similarity_search_with_score( query=query, k=k, filter=filter, **kwargs ) return [doc for doc, _ in results] def similarity_search_with_score( self, query: str, k: int, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return documents most similar to query, along with scores. Args: query: Text to look up documents similar to. size: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query and score for each """ search_params = kwargs.get("search_params") or {} if len(search_params) == 0 or search_params.get("size") is None: search_params["size"] = k return self._search(query=query, filter=filter, **kwargs) @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, **kwargs: Any, ) -> "BESVectorStore": """Construct BESVectorStore wrapper from documents. Args: documents: List of documents to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. kwargs: create index key words arguments """ vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs) # Encode the provided texts and add them to the newly created index. vectorStore.add_documents(documents) return vectorStore @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[Dict[str, Any]]] = None, **kwargs: Any, ) -> "BESVectorStore": """Construct BESVectorStore wrapper from raw documents. Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. kwargs: create index key words arguments """ vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs) # Encode the provided texts and add them to the newly created index. vectorStore.add_texts(texts, metadatas=metadatas, **kwargs) return vectorStore def add_texts( self, texts: Iterable[str], metadatas: Optional[List[Dict[Any, Any]]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.helpers import BulkIndexError, bulk except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) embeddings = [] create_index_if_not_exists = kwargs.get("create_index_if_not_exists", True) ids = kwargs.get("ids", [str(uuid.uuid4()) for _ in texts]) refresh_indices = kwargs.get("refresh_indices", True) requests = [] if self.embedding is not None: embeddings = self.embedding.embed_documents(list(texts)) dims_length = len(embeddings[0]) if create_index_if_not_exists: self._create_index_if_not_exists(dims_length=dims_length) for i, (text, vector) in enumerate(zip(texts, embeddings)): metadata = metadatas[i] if metadatas else {} requests.append( { "_op_type": "index", "_index": self.index_name, self.query_field: text, self.vector_query_field: vector, "metadata": metadata, "_id": ids[i], } ) else: if create_index_if_not_exists: self._create_index_if_not_exists() for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} requests.append( { "_op_type": "index", "_index": self.index_name, self.query_field: text, "metadata": metadata, "_id": ids[i], } ) if len(requests) > 0: try: success, failed = bulk( self.client, requests, stats_only=True, refresh=refresh_indices ) logger.debug( f"Added {success} and failed to add {failed} texts to index" ) logger.debug(f"added texts {ids} to index") return ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to add to index") return [] @staticmethod def _bes_vector_store( embedding: Optional[Embeddings] = None, **kwargs: Any ) -> "BESVectorStore": index_name = kwargs.get("index_name") if index_name is None: raise ValueError("Please provide an index_name.") bes_url = kwargs.get("bes_url") if bes_url is None: raise ValueError("Please provided a valid bes connection url") return BESVectorStore(embedding=embedding, **kwargs)
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/annoy.py
from __future__ import annotations import os import pickle import uuid from configparser import ConfigParser from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import guard_import from langchain_core.vectorstores import VectorStore from langchain_community.docstore.base import Docstore from langchain_community.docstore.in_memory import InMemoryDocstore from langchain_community.vectorstores.utils import maximal_marginal_relevance INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"]) DEFAULT_METRIC = "angular" def dependable_annoy_import() -> Any: """Import annoy if available, otherwise raise error.""" return guard_import("annoy") class Annoy(VectorStore): """`Annoy` vector store. To use, you should have the ``annoy`` python package installed. Example: .. code-block:: python from langchain_community.vectorstores import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id) """ def __init__( self, embedding_function: Callable, index: Any, metric: str, docstore: Docstore, index_to_docstore_id: Dict[int, str], ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.metric = metric self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id @property def embeddings(self) -> Optional[Embeddings]: # TODO: Accept embedding object directly return None def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: raise NotImplementedError( "Annoy does not allow to add new data once the index is build." ) def process_index_results( self, idxs: List[int], dists: List[float] ) -> List[Tuple[Document, float]]: """Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores. """ docs = [] for idx, dist in zip(idxs, dists): _id = self.index_to_docstore_id[idx] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, dist)) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_vector( embedding, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) def similarity_search_with_score_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_item( docstore_index, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) def similarity_search_with_score( self, query: str, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k, search_k) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, search_k ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to docstore_index. Args: docstore_index: Index of document in docstore k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_index( docstore_index, k, search_k ) return [doc for doc, _ in docs_and_scores] def similarity_search( self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, search_k) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. fetch_k: Number of Documents to fetch to pass to MMR algorithm. k: Number of Documents to return. Defaults to 4. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ idxs = self.index.get_nns_by_vector( embedding, fetch_k, search_k=-1, include_distances=False ) embeddings = [self.index.get_item_vector(i) for i in idxs] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) # ignore the -1's if not enough docs are returned/indexed selected_indices = [idxs[i] for i in mmr_selected if i != -1] docs = [] for i in selected_indices: _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: if metric not in INDEX_METRICS: raise ValueError( ( f"Unsupported distance metric: {metric}. " f"Expected one of {list(INDEX_METRICS)}" ) ) annoy = guard_import("annoy") if not embeddings: raise ValueError("embeddings must be provided to build AnnoyIndex") f = len(embeddings[0]) index = annoy.AnnoyIndex(f, metric=metric) for i, emb in enumerate(embeddings): index.add_item(i, emb) index.build(trees, n_jobs=n_jobs) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, metric, docstore, index_to_id) @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Annoy from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Annoy from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) def save_local(self, folder_path: str, prefault: bool = False) -> None: """Save Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. prefault: Whether to pre-load the index into memory. """ path = Path(folder_path) os.makedirs(path, exist_ok=True) # save index, index config, docstore and index_to_docstore_id config_object = ConfigParser() config_object["ANNOY"] = { "f": self.index.f, "metric": self.metric, } self.index.save(str(path / "index.annoy"), prefault=prefault) with open(path / "index.pkl", "wb") as file: pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file) @classmethod def load_local( cls, folder_path: str, embeddings: Embeddings, *, allow_dangerous_deserialization: bool = False, ) -> Annoy: """Load Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries. allow_dangerous_deserialization: whether to allow deserialization of the data which involves loading a pickle file. Pickle files can be modified by malicious actors to deliver a malicious payload that results in execution of arbitrary code on your machine. """ if not allow_dangerous_deserialization: raise ValueError( "The de-serialization relies loading a pickle file. " "Pickle files can be modified to deliver a malicious payload that " "results in execution of arbitrary code on your machine." "You will need to set `allow_dangerous_deserialization` to `True` to " "enable deserialization. If you do this, make sure that you " "trust the source of the data. For example, if you are loading a " "file that you created, and know that no one else has modified the " "file, then this is safe to do. Do not set this to `True` if you are " "loading a file from an untrusted source (e.g., some random site on " "the internet.)." ) path = Path(folder_path) # load index separately since it is not picklable annoy = guard_import("annoy") # load docstore and index_to_docstore_id with open(path / "index.pkl", "rb") as file: # Code path can only be reached if allow_dangerous_deserialization is True ( docstore, index_to_docstore_id, config_object, ) = pickle.load( # ignore[pickle]: explicit-opt-in file ) f = int(config_object["ANNOY"]["f"]) metric = config_object["ANNOY"]["metric"] index = annoy.AnnoyIndex(f, metric=metric) index.load(str(path / "index.annoy")) return cls( embeddings.embed_query, index, metric, docstore, index_to_docstore_id )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/tair.py
from __future__ import annotations import json import logging import uuid from typing import Any, Iterable, List, Optional, Type from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore logger = logging.getLogger(__name__) def _uuid_key() -> str: return uuid.uuid4().hex class Tair(VectorStore): """`Tair` vector store.""" def __init__( self, embedding_function: Embeddings, url: str, index_name: str, content_key: str = "content", metadata_key: str = "metadata", search_params: Optional[dict] = None, **kwargs: Any, ): self.embedding_function = embedding_function self.index_name = index_name try: from tair import Tair as TairClient except ImportError: raise ImportError( "Could not import tair python package. " "Please install it with `pip install tair`." ) try: # connect to tair from url client = TairClient.from_url(url, **kwargs) except ValueError as e: raise ValueError(f"Tair failed to connect: {e}") self.client = client self.content_key = content_key self.metadata_key = metadata_key self.search_params = search_params @property def embeddings(self) -> Embeddings: return self.embedding_function def create_index_if_not_exist( self, dim: int, distance_type: str, index_type: str, data_type: str, **kwargs: Any, ) -> bool: index = self.client.tvs_get_index(self.index_name) if index is not None: logger.info("Index already exists") return False self.client.tvs_create_index( self.index_name, dim, distance_type, index_type, data_type, **kwargs, ) return True def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add texts data to an existing index.""" ids = [] keys = kwargs.get("keys", None) use_hybrid_search = False index = self.client.tvs_get_index(self.index_name) if index is not None and index.get("lexical_algorithm") == "bm25": use_hybrid_search = True # Write data to tair pipeline = self.client.pipeline(transaction=False) embeddings = self.embedding_function.embed_documents(list(texts)) for i, text in enumerate(texts): # Use provided key otherwise use default key key = keys[i] if keys else _uuid_key() metadata = metadatas[i] if metadatas else {} if use_hybrid_search: # tair use TEXT attr hybrid search pipeline.tvs_hset( self.index_name, key, embeddings[i], False, **{ "TEXT": text, self.content_key: text, self.metadata_key: json.dumps(metadata), }, ) else: pipeline.tvs_hset( self.index_name, key, embeddings[i], False, **{ self.content_key: text, self.metadata_key: json.dumps(metadata), }, ) ids.append(key) pipeline.execute() return ids def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ # Creates embedding vector from user query embedding = self.embedding_function.embed_query(query) keys_and_scores = self.client.tvs_knnsearch( self.index_name, k, embedding, False, None, **kwargs ) pipeline = self.client.pipeline(transaction=False) for key, _ in keys_and_scores: pipeline.tvs_hmget( self.index_name, key, self.metadata_key, self.content_key ) docs = pipeline.execute() return [ Document( page_content=d[1], metadata=json.loads(d[0]), ) for d in docs ] @classmethod def from_texts( cls: Type[Tair], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: try: from tair import tairvector except ImportError: raise ImportError( "Could not import tair python package. " "Please install it with `pip install tair`." ) url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL") if "tair_url" in kwargs: kwargs.pop("tair_url") distance_type = tairvector.DistanceMetric.InnerProduct if "distance_type" in kwargs: distance_type = kwargs.pop("distance_type") index_type = tairvector.IndexType.HNSW if "index_type" in kwargs: index_type = kwargs.pop("index_type") data_type = tairvector.DataType.Float32 if "data_type" in kwargs: data_type = kwargs.pop("data_type") index_params = {} if "index_params" in kwargs: index_params = kwargs.pop("index_params") search_params = {} if "search_params" in kwargs: search_params = kwargs.pop("search_params") keys = None if "keys" in kwargs: keys = kwargs.pop("keys") try: tair_vector_store = cls( embedding, url, index_name, content_key=content_key, metadata_key=metadata_key, search_params=search_params, **kwargs, ) except ValueError as e: raise ValueError(f"tair failed to connect: {e}") # Create embeddings for documents embeddings = embedding.embed_documents(texts) tair_vector_store.create_index_if_not_exist( len(embeddings[0]), distance_type, index_type, data_type, **index_params, ) tair_vector_store.add_texts(texts, metadatas, keys=keys) return tair_vector_store @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, metadatas: Optional[List[dict]] = None, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts( texts, embedding, metadatas, index_name, content_key, metadata_key, **kwargs ) @staticmethod def drop_index( index_name: str = "langchain", **kwargs: Any, ) -> bool: """ Drop an existing index. Args: index_name (str): Name of the index to drop. Returns: bool: True if the index is dropped successfully. """ try: from tair import Tair as TairClient except ImportError: raise ImportError( "Could not import tair python package. " "Please install it with `pip install tair`." ) url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL") try: if "tair_url" in kwargs: kwargs.pop("tair_url") client = TairClient.from_url(url=url, **kwargs) except ValueError as e: raise ValueError(f"Tair connection error: {e}") # delete index ret = client.tvs_del_index(index_name) if ret == 0: # index not exist logger.info("Index does not exist") return False return True @classmethod def from_existing_index( cls, embedding: Embeddings, index_name: str = "langchain", content_key: str = "content", metadata_key: str = "metadata", **kwargs: Any, ) -> Tair: """Connect to an existing Tair index.""" url = get_from_dict_or_env(kwargs, "tair_url", "TAIR_URL") search_params = {} if "search_params" in kwargs: search_params = kwargs.pop("search_params") return cls( embedding, url, index_name, content_key=content_key, metadata_key=metadata_key, search_params=search_params, **kwargs, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/bigquery_vector_search.py
"""Vector Store in Google Cloud BigQuery.""" from __future__ import annotations import asyncio import json import logging import sys import uuid from datetime import datetime from functools import partial from threading import Lock, Thread from typing import Any, Callable, Dict, List, Optional, Tuple, Type import numpy as np from langchain_core._api.deprecation import deprecated from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.utils.google import get_client_info from langchain_community.vectorstores.utils import ( DistanceStrategy, maximal_marginal_relevance, ) DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.EUCLIDEAN_DISTANCE DEFAULT_DOC_ID_COLUMN_NAME = "doc_id" # document id DEFAULT_TEXT_EMBEDDING_COLUMN_NAME = "text_embedding" # embeddings vectors DEFAULT_METADATA_COLUMN_NAME = "metadata" # document metadata DEFAULT_CONTENT_COLUMN_NAME = "content" # text content, do not rename DEFAULT_TOP_K = 4 # default number of documents returned from similarity search _MIN_INDEX_ROWS = 5000 # minimal number of rows for creating an index _INDEX_CHECK_PERIOD_SECONDS = 60 # Do not check for index more often that this. _vector_table_lock = Lock() # process-wide BigQueryVectorSearch table lock @deprecated( since="0.0.33", removal="1.0", alternative_import="langchain_google_community.BigQueryVectorSearch", ) class BigQueryVectorSearch(VectorStore): """Google Cloud BigQuery vector store. To use, you need the following packages installed: google-cloud-bigquery """ def __init__( self, embedding: Embeddings, project_id: str, dataset_name: str, table_name: str, location: str = "US", content_field: str = DEFAULT_CONTENT_COLUMN_NAME, metadata_field: str = DEFAULT_METADATA_COLUMN_NAME, text_embedding_field: str = DEFAULT_TEXT_EMBEDDING_COLUMN_NAME, doc_id_field: str = DEFAULT_DOC_ID_COLUMN_NAME, distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY, credentials: Optional[Any] = None, ): """Constructor for BigQueryVectorSearch. Args: embedding (Embeddings): Text Embedding model to use. project_id (str): GCP project. dataset_name (str): BigQuery dataset to store documents and embeddings. table_name (str): BigQuery table name. location (str, optional): BigQuery region. Defaults to `US`(multi-region). content_field (str): Specifies the column to store the content. Defaults to `content`. metadata_field (str): Specifies the column to store the metadata. Defaults to `metadata`. text_embedding_field (str): Specifies the column to store the embeddings vector. Defaults to `text_embedding`. doc_id_field (str): Specifies the column to store the document id. Defaults to `doc_id`. distance_strategy (DistanceStrategy, optional): Determines the strategy employed for calculating the distance between vectors in the embedding space. Defaults to EUCLIDEAN_DISTANCE. Available options are: - COSINE: Measures the similarity between two vectors of an inner product space. - EUCLIDEAN_DISTANCE: Computes the Euclidean distance between two vectors. This metric considers the geometric distance in the vector space, and might be more suitable for embeddings that rely on spatial relationships. This is the default behavior credentials (Credentials, optional): Custom Google Cloud credentials to use. Defaults to None. """ try: from google.cloud import bigquery client_info = get_client_info(module="bigquery-vector-search") self.bq_client = bigquery.Client( project=project_id, location=location, credentials=credentials, client_info=client_info, ) except ModuleNotFoundError: raise ImportError( "Please, install or upgrade the google-cloud-bigquery library: " "pip install google-cloud-bigquery" ) self._logger = logging.getLogger(__name__) self._creating_index = False self._have_index = False self.embedding_model = embedding self.project_id = project_id self.dataset_name = dataset_name self.table_name = table_name self.location = location self.content_field = content_field self.metadata_field = metadata_field self.text_embedding_field = text_embedding_field self.doc_id_field = doc_id_field self.distance_strategy = distance_strategy self._full_table_id = ( f"{self.project_id}." f"{self.dataset_name}." f"{self.table_name}" ) self._logger.debug("Using table `%s`", self.full_table_id) with _vector_table_lock: self.vectors_table = self._initialize_table() self._last_index_check = datetime.min self._initialize_vector_index() def _initialize_table(self) -> Any: """Validates or creates the BigQuery table.""" from google.cloud import bigquery table_ref = bigquery.TableReference.from_string(self._full_table_id) table = self.bq_client.create_table(table_ref, exists_ok=True) changed_schema = False schema = table.schema.copy() columns = {c.name: c for c in schema} if self.doc_id_field not in columns: changed_schema = True schema.append( bigquery.SchemaField(name=self.doc_id_field, field_type="STRING") ) elif ( columns[self.doc_id_field].field_type != "STRING" or columns[self.doc_id_field].mode == "REPEATED" ): raise ValueError(f"Column {self.doc_id_field} must be of " "STRING type") if self.metadata_field not in columns: changed_schema = True schema.append( bigquery.SchemaField(name=self.metadata_field, field_type="JSON") ) elif ( columns[self.metadata_field].field_type not in ["JSON", "STRING"] or columns[self.metadata_field].mode == "REPEATED" ): raise ValueError( f"Column {self.metadata_field} must be of STRING or JSON type" ) if self.content_field not in columns: changed_schema = True schema.append( bigquery.SchemaField(name=self.content_field, field_type="STRING") ) elif ( columns[self.content_field].field_type != "STRING" or columns[self.content_field].mode == "REPEATED" ): raise ValueError(f"Column {self.content_field} must be of " "STRING type") if self.text_embedding_field not in columns: changed_schema = True schema.append( bigquery.SchemaField( name=self.text_embedding_field, field_type="FLOAT64", mode="REPEATED", ) ) elif ( columns[self.text_embedding_field].field_type not in ("FLOAT", "FLOAT64") or columns[self.text_embedding_field].mode != "REPEATED" ): raise ValueError( f"Column {self.text_embedding_field} must be of " "ARRAY<FLOAT64> type" ) if changed_schema: self._logger.debug("Updated table `%s` schema.", self.full_table_id) table.schema = schema table = self.bq_client.update_table(table, fields=["schema"]) return table def _initialize_vector_index(self) -> Any: """ A vector index in BigQuery table enables efficient approximate vector search. """ from google.cloud import bigquery if self._have_index or self._creating_index: # Already have an index or in the process of creating one. return table = self.bq_client.get_table(self.vectors_table) if (table.num_rows or 0) < _MIN_INDEX_ROWS: # Not enough rows to create index. self._logger.debug("Not enough rows to create a vector index.") return if ( datetime.utcnow() - self._last_index_check ).total_seconds() < _INDEX_CHECK_PERIOD_SECONDS: return with _vector_table_lock: if self._creating_index or self._have_index: return self._last_index_check = datetime.utcnow() # Check if index exists, create if necessary check_query = ( f"SELECT 1 FROM `{self.project_id}.{self.dataset_name}" ".INFORMATION_SCHEMA.VECTOR_INDEXES` WHERE" f" table_name = '{self.table_name}'" ) job = self.bq_client.query( check_query, api_method=bigquery.enums.QueryApiMethod.QUERY ) if job.result().total_rows == 0: # Need to create an index. Make it in a separate thread. self._create_index_in_background() else: self._logger.debug("Vector index already exists.") self._have_index = True def _create_index_in_background(self): # type: ignore[no-untyped-def] if self._have_index or self._creating_index: # Already have an index or in the process of creating one. return self._creating_index = True self._logger.debug("Trying to create a vector index.") thread = Thread(target=self._create_index, daemon=True) thread.start() def _create_index(self): # type: ignore[no-untyped-def] from google.api_core.exceptions import ClientError table = self.bq_client.get_table(self.vectors_table) if (table.num_rows or 0) < _MIN_INDEX_ROWS: # Not enough rows to create index. return if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: distance_type = "EUCLIDEAN" elif self.distance_strategy == DistanceStrategy.COSINE: distance_type = "COSINE" # Default to EUCLIDEAN_DISTANCE else: distance_type = "EUCLIDEAN" index_name = f"{self.table_name}_langchain_index" try: sql = f""" CREATE VECTOR INDEX IF NOT EXISTS `{index_name}` ON `{self.full_table_id}`({self.text_embedding_field}) OPTIONS(distance_type="{distance_type}", index_type="IVF") """ self.bq_client.query(sql).result() self._have_index = True except ClientError as ex: self._logger.debug("Vector index creation failed (%s).", ex.args[0]) finally: self._creating_index = False def _persist(self, data: Dict[str, Any]) -> None: """Saves documents and embeddings to BigQuery.""" from google.cloud import bigquery data_len = len(data[list(data.keys())[0]]) if data_len == 0: return list_of_dicts = [dict(zip(data, t)) for t in zip(*data.values())] job_config = bigquery.LoadJobConfig() job_config.schema = self.vectors_table.schema job_config.schema_update_options = ( bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION ) job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND job = self.bq_client.load_table_from_json( list_of_dicts, self.vectors_table, job_config=job_config ) job.result() @property def embeddings(self) -> Optional[Embeddings]: return self.embedding_model @property def full_table_id(self) -> str: return self._full_table_id def add_texts( # type: ignore[override] self, texts: List[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: List of strings to add to the vectorstore. metadatas: Optional list of metadata associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ embs = self.embedding_model.embed_documents(texts) return self.add_texts_with_embeddings(texts, embs, metadatas, **kwargs) def add_texts_with_embeddings( self, texts: List[str], embs: List[List[float]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: List of strings to add to the vectorstore. embs: List of lists of floats with text embeddings for texts. metadatas: Optional list of metadata associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ ids = [uuid.uuid4().hex for _ in texts] values_dict: Dict[str, List[Any]] = { self.content_field: texts, self.doc_id_field: ids, } if not metadatas: metadatas = [] len_diff = len(ids) - len(metadatas) add_meta = [None for _ in range(0, len_diff)] metadatas = [m if m is not None else {} for m in metadatas + add_meta] values_dict[self.metadata_field] = metadatas values_dict[self.text_embedding_field] = embs self._persist(values_dict) return ids def get_documents( self, ids: Optional[List[str]] = None, filter: Optional[Dict[str, Any]] = None ) -> List[Document]: """Search documents by their ids or metadata values. Args: ids: List of ids of documents to retrieve from the vectorstore. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } Returns: List of ids from adding the texts into the vectorstore. """ if ids and len(ids) > 0: from google.cloud import bigquery job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ArrayQueryParameter("ids", "STRING", ids), ] ) id_expr = f"{self.doc_id_field} IN UNNEST(@ids)" else: job_config = None id_expr = "TRUE" if filter: filter_expressions = [] for i in filter.items(): if isinstance(i[1], float): expr = ( "ABS(CAST(JSON_VALUE(" f"`{self.metadata_field}`,'$.{i[0]}') " f"AS FLOAT64) - {i[1]}) " f"<= {sys.float_info.epsilon}" ) else: val = str(i[1]).replace('"', '\\"') expr = ( f"JSON_VALUE(`{self.metadata_field}`,'$.{i[0]}')" f' = "{val}"' ) filter_expressions.append(expr) filter_expression_str = " AND ".join(filter_expressions) where_filter_expr = f" AND ({filter_expression_str})" else: where_filter_expr = "" job = self.bq_client.query( f""" SELECT * FROM `{self.full_table_id}` WHERE {id_expr} {where_filter_expr} """, job_config=job_config, ) docs: List[Document] = [] for row in job: metadata = None if self.metadata_field: metadata = row[self.metadata_field] if metadata: if not isinstance(metadata, dict): metadata = json.loads(metadata) else: metadata = {} metadata["__id"] = row[self.doc_id_field] doc = Document(page_content=row[self.content_field], metadata=metadata) docs.append(doc) return docs def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if not ids or len(ids) == 0: return True from google.cloud import bigquery job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ArrayQueryParameter("ids", "STRING", ids), ] ) self.bq_client.query( f""" DELETE FROM `{self.full_table_id}` WHERE {self.doc_id_field} IN UNNEST(@ids) """, job_config=job_config, ).result() return True async def adelete( self, ids: Optional[List[str]] = None, **kwargs: Any ) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ return await asyncio.get_running_loop().run_in_executor( None, partial(self.delete, **kwargs), ids ) def _search_with_score_and_embeddings_by_vector( self, embedding: List[float], k: int = DEFAULT_TOP_K, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, ) -> List[Tuple[Document, List[float], float]]: from google.cloud import bigquery # Create an index if no index exists. if not self._have_index and not self._creating_index: self._initialize_vector_index() # Prepare filter filter_expr = "TRUE" if filter: filter_expressions = [] for i in filter.items(): if isinstance(i[1], float): expr = ( "ABS(CAST(JSON_VALUE(" f"base.`{self.metadata_field}`,'$.{i[0]}') " f"AS FLOAT64) - {i[1]}) " f"<= {sys.float_info.epsilon}" ) else: val = str(i[1]).replace('"', '\\"') expr = ( f"JSON_VALUE(base.`{self.metadata_field}`,'$.{i[0]}')" f' = "{val}"' ) filter_expressions.append(expr) filter_expression_str = " AND ".join(filter_expressions) filter_expr += f" AND ({filter_expression_str})" # Configure and run a query job. job_config = bigquery.QueryJobConfig( query_parameters=[ bigquery.ArrayQueryParameter("v", "FLOAT64", embedding), ], use_query_cache=False, priority=bigquery.QueryPriority.BATCH, ) if self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: distance_type = "EUCLIDEAN" elif self.distance_strategy == DistanceStrategy.COSINE: distance_type = "COSINE" # Default to EUCLIDEAN_DISTANCE else: distance_type = "EUCLIDEAN" if brute_force: options_string = ",options => '{\"use_brute_force\":true}'" elif fraction_lists_to_search: if fraction_lists_to_search == 0 or fraction_lists_to_search >= 1.0: raise ValueError( "`fraction_lists_to_search` must be between " "0.0 and 1.0" ) options_string = ( ',options => \'{"fraction_lists_to_search":' f"{fraction_lists_to_search}}}'" ) else: options_string = "" query = f""" SELECT base.*, distance AS _vector_search_distance FROM VECTOR_SEARCH( TABLE `{self.full_table_id}`, "{self.text_embedding_field}", (SELECT @v AS {self.text_embedding_field}), distance_type => "{distance_type}", top_k => {k} {options_string} ) WHERE {filter_expr} LIMIT {k} """ document_tuples: List[Tuple[Document, List[float], float]] = [] # TODO(vladkol): Use jobCreationMode=JOB_CREATION_OPTIONAL when available. job = self.bq_client.query( query, job_config=job_config, api_method=bigquery.enums.QueryApiMethod.QUERY ) # Process job results. for row in job: metadata = row[self.metadata_field] if metadata: if not isinstance(metadata, dict): metadata = json.loads(metadata) else: metadata = {} metadata["__id"] = row[self.doc_id_field] metadata["__job_id"] = job.job_id doc = Document(page_content=row[self.content_field], metadata=metadata) document_tuples.append( (doc, row[self.text_embedding_field], row["_vector_search_distance"]) ) return document_tuples def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = DEFAULT_TOP_K, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector with distance. """ del kwargs document_tuples = self._search_with_score_and_embeddings_by_vector( embedding, k, filter, brute_force, fraction_lists_to_search ) return [(doc, distance) for doc, _, distance in document_tuples] def similarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_TOP_K, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector. """ tuples = self.similarity_search_with_score_by_vector( embedding, k, filter, brute_force, fraction_lists_to_search, **kwargs ) return [i[0] for i in tuples] def similarity_search_with_score( self, query: str, k: int = DEFAULT_TOP_K, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with score. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector, with similarity scores. """ emb = self.embedding_model.embed_query(query) # type: ignore return self.similarity_search_with_score_by_vector( emb, k, filter, brute_force, fraction_lists_to_search, **kwargs ) def similarity_search( self, query: str, k: int = DEFAULT_TOP_K, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector. """ tuples = self.similarity_search_with_score( query, k, filter, brute_force, fraction_lists_to_search, **kwargs ) return [i[0] for i in tuples] def _select_relevance_score_fn(self) -> Callable[[float], float]: if self.distance_strategy == DistanceStrategy.COSINE: return BigQueryVectorSearch._cosine_relevance_score_fn else: raise ValueError( "Relevance score is not supported " f"for `{self.distance_strategy}` distance." ) def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_TOP_K, fetch_k: int = DEFAULT_TOP_K * 5, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: search query text. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents selected by maximal marginal relevance. """ query_embedding = self.embedding_model.embed_query( # type: ignore query ) doc_tuples = self._search_with_score_and_embeddings_by_vector( query_embedding, fetch_k, filter, brute_force, fraction_lists_to_search ) doc_embeddings = [d[1] for d in doc_tuples] mmr_doc_indexes = maximal_marginal_relevance( np.array(query_embedding), doc_embeddings, lambda_mult=lambda_mult, k=k ) return [doc_tuples[i][0] for i in mmr_doc_indexes] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_TOP_K, fetch_k: int = DEFAULT_TOP_K * 5, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents selected by maximal marginal relevance. """ doc_tuples = self._search_with_score_and_embeddings_by_vector( embedding, fetch_k, filter, brute_force, fraction_lists_to_search ) doc_embeddings = [d[1] for d in doc_tuples] mmr_doc_indexes = maximal_marginal_relevance( np.array(embedding), doc_embeddings, lambda_mult=lambda_mult, k=k ) return [doc_tuples[i][0] for i in mmr_doc_indexes] async def amax_marginal_relevance_search( self, query: str, k: int = DEFAULT_TOP_K, fetch_k: int = DEFAULT_TOP_K * 5, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" func = partial( self.max_marginal_relevance_search, query, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, brute_force=brute_force, fraction_lists_to_search=fraction_lists_to_search, **kwargs, ) return await asyncio.get_event_loop().run_in_executor(None, func) async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_TOP_K, fetch_k: int = DEFAULT_TOP_K * 5, lambda_mult: float = 0.5, filter: Optional[Dict[str, Any]] = None, brute_force: bool = False, fraction_lists_to_search: Optional[float] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" return await asyncio.get_running_loop().run_in_executor( None, partial(self.max_marginal_relevance_search_by_vector, **kwargs), embedding, k, fetch_k, lambda_mult, filter, brute_force, fraction_lists_to_search, ) @classmethod def from_texts( cls: Type["BigQueryVectorSearch"], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> "BigQueryVectorSearch": """Return VectorStore initialized from texts and embeddings.""" vs_obj = BigQueryVectorSearch(embedding=embedding, **kwargs) vs_obj.add_texts(texts, metadatas) return vs_obj def explore_job_stats(self, job_id: str) -> Dict: """Return the statistics for a single job execution. Args: job_id: The BigQuery Job id. Returns: A dictionary of job statistics for a given job. """ return self.bq_client.get_job(job_id)._properties["statistics"]
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/vectara.py
from __future__ import annotations import json import logging import os import warnings from dataclasses import dataclass, field from hashlib import md5 from typing import Any, Iterable, Iterator, List, Optional, Tuple, Type import requests from langchain_core.callbacks.manager import ( CallbackManagerForRetrieverRun, ) from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.runnables import Runnable, RunnableConfig from langchain_core.vectorstores import VectorStore, VectorStoreRetriever from pydantic import ConfigDict logger = logging.getLogger(__name__) MMR_RERANKER_ID = 272725718 RERANKER_MULTILINGUAL_V1_ID = 272725719 UDF_RERANKER_ID = 272725722 @dataclass class SummaryConfig: """Configuration for summary generation. is_enabled: True if summary is enabled, False otherwise max_results: maximum number of results to summarize response_lang: requested language for the summary prompt_name: name of the prompt to use for summarization (see https://docs.vectara.com/docs/learn/grounded-generation/select-a-summarizer) """ is_enabled: bool = False max_results: int = 7 response_lang: str = "eng" prompt_name: str = "vectara-summary-ext-24-05-med-omni" stream: bool = False @dataclass class MMRConfig: """Configuration for Maximal Marginal Relevance (MMR) search. This will soon be deprated in favor of RerankConfig. is_enabled: True if MMR is enabled, False otherwise mmr_k: number of results to fetch for MMR, defaults to 50 diversity_bias: number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to minimum diversity and 1 to maximum diversity. Defaults to 0.3. Note: diversity_bias is equivalent 1-lambda_mult where lambda_mult is the value often used in max_marginal_relevance_search() We chose to use that since we believe it's more intuitive to the user. """ is_enabled: bool = False mmr_k: int = 50 diversity_bias: float = 0.3 @dataclass class RerankConfig: """Configuration for Reranker. reranker: "mmr", "rerank_multilingual_v1", "udf" or "none" rerank_k: number of results to fetch before reranking, defaults to 50 mmr_diversity_bias: for MMR only - a number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to minimum diversity and 1 to maximum diversity. Defaults to 0.3. Note: mmr_diversity_bias is equivalent 1-lambda_mult where lambda_mult is the value often used in max_marginal_relevance_search() We chose to use that since we believe it's more intuitive to the user. user_function: for UDF only - the user function to use for reranking. """ reranker: str = "none" rerank_k: int = 50 mmr_diversity_bias: float = 0.3 user_function: str = "" @dataclass class VectaraQueryConfig: """Configuration for Vectara query. k: Number of Documents to return. Defaults to 10. lambda_val: lexical match parameter for hybrid search. filter Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. score_threshold: minimal score threshold for the result. If defined, results with score less than this value will be filtered out. n_sentence_before: number of sentences before the matching segment to add, defaults to 2 n_sentence_after: number of sentences before the matching segment to add, defaults to 2 rerank_config: RerankConfig configuration dataclass summary_config: SummaryConfig configuration dataclass """ k: int = 10 lambda_val: float = 0.0 filter: str = "" score_threshold: Optional[float] = None n_sentence_before: int = 2 n_sentence_after: int = 2 rerank_config: RerankConfig = field(default_factory=RerankConfig) summary_config: SummaryConfig = field(default_factory=SummaryConfig) def __init__( self, k: int = 10, lambda_val: float = 0.0, filter: str = "", score_threshold: Optional[float] = None, n_sentence_before: int = 2, n_sentence_after: int = 2, n_sentence_context: Optional[int] = None, mmr_config: Optional[MMRConfig] = None, summary_config: Optional[SummaryConfig] = None, rerank_config: Optional[RerankConfig] = None, ): self.k = k self.lambda_val = lambda_val self.filter = filter self.score_threshold = score_threshold if summary_config: self.summary_config = summary_config else: self.summary_config = SummaryConfig() # handle n_sentence_context for backward compatibility if n_sentence_context: self.n_sentence_before = n_sentence_context self.n_sentence_after = n_sentence_context warnings.warn( "n_sentence_context is deprecated. " "Please use n_sentence_before and n_sentence_after instead", DeprecationWarning, ) else: self.n_sentence_before = n_sentence_before self.n_sentence_after = n_sentence_after # handle mmr_config for backward compatibility if rerank_config: self.rerank_config = rerank_config elif mmr_config: self.rerank_config = RerankConfig( reranker="mmr", rerank_k=mmr_config.mmr_k, mmr_diversity_bias=mmr_config.diversity_bias, ) warnings.warn( "MMRConfig is deprecated. Please use RerankConfig instead.", DeprecationWarning, ) else: self.rerank_config = RerankConfig() class Vectara(VectorStore): """`Vectara API` vector store. See (https://vectara.com). Example: .. code-block:: python from langchain_community.vectorstores import Vectara vectorstore = Vectara( vectara_customer_id=vectara_customer_id, vectara_corpus_id=vectara_corpus_id, vectara_api_key=vectara_api_key ) """ def __init__( self, vectara_customer_id: Optional[str] = None, vectara_corpus_id: Optional[str] = None, vectara_api_key: Optional[str] = None, vectara_api_timeout: int = 120, source: str = "langchain", ): """Initialize with Vectara API.""" self._vectara_customer_id = vectara_customer_id or os.environ.get( "VECTARA_CUSTOMER_ID" ) self._vectara_corpus_id = vectara_corpus_id or os.environ.get( "VECTARA_CORPUS_ID" ) self._vectara_api_key = vectara_api_key or os.environ.get("VECTARA_API_KEY") if ( self._vectara_customer_id is None or self._vectara_corpus_id is None or self._vectara_api_key is None ): logger.warning( "Can't find Vectara credentials, customer_id or corpus_id in " "environment." ) else: logger.debug(f"Using corpus id {self._vectara_corpus_id}") self._source = source self._session = requests.Session() # to reuse connections adapter = requests.adapters.HTTPAdapter(max_retries=3) self._session.mount("http://", adapter) self.vectara_api_timeout = vectara_api_timeout @property def embeddings(self) -> Optional[Embeddings]: return None def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" return { "x-api-key": self._vectara_api_key, "customer-id": self._vectara_customer_id, "Content-Type": "application/json", "X-Source": self._source, } def _delete_doc(self, doc_id: str) -> bool: """ Delete a document from the Vectara corpus. Args: doc_id (str): ID of the document to delete. Returns: bool: True if deletion was successful, False otherwise. """ body = { "customer_id": self._vectara_customer_id, "corpus_id": self._vectara_corpus_id, "document_id": doc_id, } response = self._session.post( "https://api.vectara.io/v1/delete-doc", data=json.dumps(body), verify=True, headers=self._get_post_headers(), timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( f"Delete request failed for doc_id = {doc_id} with status code " f"{response.status_code}, reason {response.reason}, text " f"{response.text}" ) return False return True def _index_doc(self, doc: dict, use_core_api: bool = False) -> str: request: dict[str, Any] = {} request["customer_id"] = self._vectara_customer_id request["corpus_id"] = self._vectara_corpus_id request["document"] = doc api_endpoint = ( "https://api.vectara.io/v1/core/index" if use_core_api else "https://api.vectara.io/v1/index" ) response = self._session.post( headers=self._get_post_headers(), url=api_endpoint, data=json.dumps(request), timeout=self.vectara_api_timeout, verify=True, ) status_code = response.status_code result = response.json() status_str = result["status"]["code"] if "status" in result else None if status_code == 409 or status_str and (status_str == "ALREADY_EXISTS"): return "E_ALREADY_EXISTS" elif status_str and (status_str == "FORBIDDEN"): return "E_NO_PERMISSIONS" else: return "E_SUCCEEDED" def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector ID or other criteria. Args: ids: List of ids to delete. Returns: Optional[bool]: True if deletion is successful, False otherwise, None if not implemented. """ if ids: success = [self._delete_doc(id) for id in ids] return all(success) else: return True def add_files( self, files_list: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """ Vectara provides a way to add documents directly via our API where pre-processing and chunking occurs internally in an optimal way This method provides a way to use that API in LangChain Args: files_list: Iterable of strings, each representing a local file path. Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc. see API docs for full list metadatas: Optional list of metadatas associated with each file Returns: List of ids associated with each of the files indexed """ doc_ids = [] for inx, file in enumerate(files_list): if not os.path.exists(file): logger.error(f"File {file} does not exist, skipping") continue md = metadatas[inx] if metadatas else {} files: dict = { "file": (file, open(file, "rb")), "doc_metadata": json.dumps(md), } headers = self._get_post_headers() headers.pop("Content-Type") response = self._session.post( f"https://api.vectara.io/upload?c={self._vectara_customer_id}&o={self._vectara_corpus_id}&d=True", files=files, verify=True, headers=headers, timeout=self.vectara_api_timeout, ) if response.status_code == 409: doc_id = response.json()["document"]["documentId"] logger.info( f"File {file} already exists on Vectara (doc_id={doc_id}), skipping" ) elif response.status_code == 200: doc_id = response.json()["document"]["documentId"] doc_ids.append(doc_id) else: logger.info(f"Error indexing file {file}: {response.json()}") return doc_ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, doc_metadata: Optional[dict] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "section" and the metadata are associated with each section. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: document ID of the document added """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] if doc_metadata: doc_metadata["source"] = "langchain" else: doc_metadata = {"source": "langchain"} use_core_api = kwargs.get("use_core_api", False) section_key = "parts" if use_core_api else "section" doc = { "document_id": doc_id, "metadataJson": json.dumps(doc_metadata), section_key: [ {"text": text, "metadataJson": json.dumps(md)} for text, md in zip(texts, metadatas) ], } success_str = self._index_doc(doc, use_core_api=use_core_api) if success_str == "E_ALREADY_EXISTS": self._delete_doc(doc_id) self._index_doc(doc) elif success_str == "E_NO_PERMISSIONS": print( # noqa: T201 """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" ) return [doc_id] def _get_query_body( self, query: str, config: VectaraQueryConfig, chat: Optional[bool] = False, chat_conv_id: Optional[str] = None, **kwargs: Any, ) -> dict: """Build the body for the API Args: query: Text to look up documents similar to. config: VectaraQueryConfig object Returns: A dictionary with the body of the query """ if isinstance(config.rerank_config, dict): config.rerank_config = RerankConfig(**config.rerank_config) if isinstance(config.summary_config, dict): config.summary_config = SummaryConfig(**config.summary_config) body = { "query": [ { "query": query, "start": 0, "numResults": ( config.rerank_config.rerank_k if ( config.rerank_config.reranker in ["mmr", "udf", "rerank_multilingual_v1"] ) else config.k ), "contextConfig": { "sentencesBefore": config.n_sentence_before, "sentencesAfter": config.n_sentence_after, }, "corpusKey": [ { "corpusId": self._vectara_corpus_id, "metadataFilter": config.filter, } ], } ] } if config.lambda_val > 0: body["query"][0]["corpusKey"][0]["lexicalInterpolationConfig"] = { # type: ignore "lambda": config.lambda_val } if config.rerank_config.reranker == "mmr": body["query"][0]["rerankingConfig"] = { "rerankerId": MMR_RERANKER_ID, "mmrConfig": {"diversityBias": config.rerank_config.mmr_diversity_bias}, } elif config.rerank_config.reranker == "udf": body["query"][0]["rerankingConfig"] = { "rerankerId": UDF_RERANKER_ID, "userFunction": config.rerank_config.user_function, } elif config.rerank_config.reranker == "rerank_multilingual_v1": body["query"][0]["rerankingConfig"] = { "rerankerId": RERANKER_MULTILINGUAL_V1_ID, } if config.summary_config.is_enabled: body["query"][0]["summary"] = [ { "maxSummarizedResults": config.summary_config.max_results, "responseLang": config.summary_config.response_lang, "summarizerPromptName": config.summary_config.prompt_name, } ] if chat: body["query"][0]["summary"][0]["chat"] = { # type: ignore "store": True, "conversationId": chat_conv_id, } return body def vectara_query( self, query: str, config: VectaraQueryConfig, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run a Vectara query Args: query: Text to look up documents similar to. config: VectaraQueryConfig object Returns: A list of k Documents matching the given query If summary is enabled, last document is the summary text with 'summary'=True """ body = self._get_query_body(query, config, **kwargs) response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/query", data=json.dumps(body), timeout=self.vectara_api_timeout, ) if response.status_code != 200: logger.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return [] result = response.json() if config.score_threshold: responses = [ r for r in result["responseSet"][0]["response"] if r["score"] > config.score_threshold ] else: responses = result["responseSet"][0]["response"] documents = result["responseSet"][0]["document"] metadatas = [] for x in responses: md = {m["name"]: m["value"] for m in x["metadata"]} doc_num = x["documentIndex"] doc_md = {m["name"]: m["value"] for m in documents[doc_num]["metadata"]} if "source" not in doc_md: doc_md["source"] = "vectara" md.update(doc_md) metadatas.append(md) res = [ ( Document( page_content=x["text"], metadata=md, ), x["score"], ) for x, md in zip(responses, metadatas) ] if config.rerank_config.reranker in ["mmr", "rerank_multilingual_v1"]: res = res[: config.k] if config.summary_config.is_enabled: summary = result["responseSet"][0]["summary"][0]["text"] fcs = result["responseSet"][0]["summary"][0]["factualConsistency"]["score"] res.append( ( Document( page_content=summary, metadata={"summary": True, "fcs": fcs} ), 0.0, ) ) return res def similarity_search_with_score( self, query: str, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 10. any other querying variable in VectaraQueryConfig like: - lambda_val: lexical match parameter for hybrid search. - filter: filter string - score_threshold: minimal score threshold for the result. - n_sentence_before: number of sentences before the matching segment - n_sentence_after: number of sentences after the matching segment - rerank_config: optional configuration for Reranking (see RerankConfig dataclass) - summary_config: optional configuration for summary (see SummaryConfig dataclass) Returns: List of Documents most similar to the query and score for each. """ config = VectaraQueryConfig(**kwargs) docs = self.vectara_query(query, config) return docs def similarity_search( # type: ignore[override] self, query: str, **kwargs: Any, ) -> List[Document]: """Return Vectara documents most similar to query, along with scores. Args: query: Text to look up documents similar to. any other querying variable in VectaraQueryConfig Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, **kwargs, ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search( # type: ignore[override] self, query: str, fetch_k: int = 50, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Defaults to 50 lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. kwargs: any other querying variable in VectaraQueryConfig Returns: List of Documents selected by maximal marginal relevance. """ kwargs["rerank_config"] = RerankConfig( reranker="mmr", rerank_k=fetch_k, mmr_diversity_bias=1 - lambda_mult ) return self.similarity_search(query, **kwargs) @classmethod def from_texts( cls: Type[Vectara], texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Notes: # * Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) # * when metadatas[] are provided they are associated with each "part" # in Vectara. doc_metadata can be used to provide additional metadata # for the document itself (applies to all "texts" in this call) doc_metadata = kwargs.pop("doc_metadata", {}) vectara = cls(**kwargs) vectara.add_texts(texts, metadatas, doc_metadata=doc_metadata, **kwargs) return vectara @classmethod def from_files( cls: Type[Vectara], files: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Vectara vectara = Vectara.from_files( files_list, vectara_customer_id=customer_id, vectara_corpus_id=corpus_id, vectara_api_key=api_key, ) """ # Note: Vectara generates its own embeddings, so we ignore the provided # embeddings (required by interface) vectara = cls(**kwargs) vectara.add_files(files, metadatas) return vectara def as_rag(self, config: VectaraQueryConfig) -> VectaraRAG: """Return a Vectara RAG runnable.""" return VectaraRAG(self, config) def as_chat(self, config: VectaraQueryConfig) -> VectaraRAG: """Return a Vectara RAG runnable for chat.""" return VectaraRAG(self, config, chat=True) def as_retriever(self, **kwargs: Any) -> VectaraRetriever: """return a retriever object.""" return VectaraRetriever( vectorstore=self, config=kwargs.get("config", VectaraQueryConfig()) ) class VectaraRetriever(VectorStoreRetriever): # type: ignore[override] """Vectara Retriever class.""" vectorstore: Vectara """VectorStore to use for retrieval.""" config: VectaraQueryConfig """Configuration for this retriever.""" model_config = ConfigDict( arbitrary_types_allowed=True, ) def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: docs_and_scores = self.vectorstore.vectara_query(query, self.config) return [doc for doc, _ in docs_and_scores] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" return self.vectorstore.add_documents(documents, **kwargs) class VectaraRAG(Runnable): """Vectara RAG runnable. Parameters: vectara: Vectara object config: VectaraQueryConfig object chat: bool, default False """ def __init__( self, vectara: Vectara, config: VectaraQueryConfig, chat: bool = False ): self.vectara = vectara self.config = config self.chat = chat self.conv_id = None def stream( self, input: str, config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> Iterator[dict]: """Get streaming output from Vectara RAG. Args: input: The input query config: RunnableConfig object kwargs: Any additional arguments Returns: The output dictionary with question, answer and context """ body = self.vectara._get_query_body(input, self.config, self.chat, self.conv_id) response = self.vectara._session.post( headers=self.vectara._get_post_headers(), url="https://api.vectara.io/v1/stream-query", data=json.dumps(body), timeout=self.vectara.vectara_api_timeout, stream=True, ) if response.status_code != 200: logger.error( "Query failed %s", f"(code {response.status_code}, reason {response.reason}, details " f"{response.text})", ) return responses = [] documents = [] yield {"question": input} # First chunk is the question for line in response.iter_lines(): if line: # filter out keep-alive new lines data = json.loads(line.decode("utf-8")) result = data["result"] response_set = result["responseSet"] if response_set is None: summary = result.get("summary", None) if summary is None: continue if len(summary.get("status")) > 0: logger.error( f"Summary generation failed with status " f"{summary.get('status')[0].get('statusDetail')}" ) continue # Store conversation ID for chat, if applicable chat = summary.get("chat", None) if chat and chat.get("status", None): st_code = chat["status"] logger.info(f"Chat query failed with code {st_code}") if st_code == "RESOURCE_EXHAUSTED": self.conv_id = None logger.error( "Sorry, Vectara chat turns exceeds plan limit." ) continue conv_id = chat.get("conversationId", None) if chat else None if conv_id: self.conv_id = conv_id # If FCS is provided, pull it from the JSON response if summary.get("factualConsistency", None): fcs = summary.get("factualConsistency", {}).get("score", None) yield {"fcs": fcs} continue # Yield the summary chunk chunk = str(summary["text"]) yield {"answer": chunk} else: if self.config.score_threshold: responses = [ r for r in response_set["response"] if r["score"] > self.config.score_threshold ] else: responses = response_set["response"] documents = response_set["document"] metadatas = [] for x in responses: md = {m["name"]: m["value"] for m in x["metadata"]} doc_num = x["documentIndex"] doc_md = { m["name"]: m["value"] for m in documents[doc_num]["metadata"] } if "source" not in doc_md: doc_md["source"] = "vectara" md.update(doc_md) metadatas.append(md) res = [ ( Document( page_content=x["text"], metadata=md, ), x["score"], ) for x, md in zip(responses, metadatas) ] if self.config.rerank_config.reranker in [ "mmr", "rerank_multilingual_v1", ]: res = res[: self.config.k] yield {"context": res} return def invoke( self, input: str, config: Optional[RunnableConfig] = None, **kwargs: Any, ) -> dict: res = {"answer": ""} for chunk in self.stream(input): if "context" in chunk: res["context"] = chunk["context"] elif "question" in chunk: res["question"] = chunk["question"] elif "answer" in chunk: res["answer"] += chunk["answer"] elif "fcs" in chunk: res["fcs"] = chunk["fcs"] else: logger.error(f"Unknown chunk type: {chunk}") return res
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/surrealdb.py
import asyncio from typing import Any, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore from langchain_community.vectorstores.utils import maximal_marginal_relevance DEFAULT_K = 4 # Number of Documents to return. class SurrealDBStore(VectorStore): """ SurrealDB as Vector Store. To use, you should have the ``surrealdb`` python package installed. Args: embedding_function: Embedding function to use. dburl: SurrealDB connection url ns: surrealdb namespace for the vector store. (default: "langchain") db: surrealdb database for the vector store. (default: "database") collection: surrealdb collection for the vector store. (default: "documents") (optional) db_user and db_pass: surrealdb credentials Example: .. code-block:: python from langchain_community.vectorstores.surrealdb import SurrealDBStore from langchain_community.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" embedding_function = HuggingFaceEmbeddings(model_name=model_name) dburl = "ws://localhost:8000/rpc" ns = "langchain" db = "docstore" collection = "documents" db_user = "root" db_pass = "root" sdb = SurrealDBStore.from_texts( texts=texts, embedding=embedding_function, dburl, ns, db, collection, db_user=db_user, db_pass=db_pass) """ def __init__( self, embedding_function: Embeddings, **kwargs: Any, ) -> None: try: from surrealdb import Surreal except ImportError as e: raise ImportError( """Cannot import from surrealdb. please install with `pip install surrealdb`.""" ) from e self.dburl = kwargs.pop("dburl", "ws://localhost:8000/rpc") if self.dburl[0:2] == "ws": self.sdb = Surreal(self.dburl) else: raise ValueError("Only websocket connections are supported at this time.") self.ns = kwargs.pop("ns", "langchain") self.db = kwargs.pop("db", "database") self.collection = kwargs.pop("collection", "documents") self.embedding_function = embedding_function self.kwargs = kwargs async def initialize(self) -> None: """ Initialize connection to surrealdb database and authenticate if credentials are provided """ await self.sdb.connect() if "db_user" in self.kwargs and "db_pass" in self.kwargs: user = self.kwargs.get("db_user") password = self.kwargs.get("db_pass") await self.sdb.signin({"user": user, "pass": password}) await self.sdb.use(self.ns, self.db) @property def embeddings(self) -> Optional[Embeddings]: return ( self.embedding_function if isinstance(self.embedding_function, Embeddings) else None ) async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add list of text along with embeddings to the vector store asynchronously Args: texts (Iterable[str]): collection of text to add to the database Returns: List of ids for the newly inserted documents """ embeddings = self.embedding_function.embed_documents(list(texts)) ids = [] for idx, text in enumerate(texts): data = {"text": text, "embedding": embeddings[idx]} if metadatas is not None and idx < len(metadatas): data["metadata"] = metadatas[idx] # type: ignore[assignment] else: data["metadata"] = [] record = await self.sdb.create( self.collection, data, ) ids.append(record[0]["id"]) return ids def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add list of text along with embeddings to the vector store Args: texts (Iterable[str]): collection of text to add to the database Returns: List of ids for the newly inserted documents """ async def _add_texts( texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: await self.initialize() return await self.aadd_texts(texts, metadatas, **kwargs) return asyncio.run(_add_texts(texts, metadatas, **kwargs)) async def adelete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete by document ID asynchronously. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise. """ if ids is None: await self.sdb.delete(self.collection) return True else: if isinstance(ids, str): await self.sdb.delete(ids) return True else: if isinstance(ids, list) and len(ids) > 0: _ = [await self.sdb.delete(id) for id in ids] return True return False def delete( self, ids: Optional[List[str]] = None, **kwargs: Any, ) -> Optional[bool]: """Delete by document ID. Args: ids: List of ids to delete. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful, False otherwise. """ async def _delete(ids: Optional[List[str]], **kwargs: Any) -> Optional[bool]: await self.initialize() return await self.adelete(ids=ids, **kwargs) return asyncio.run(_delete(ids, **kwargs)) async def _asimilarity_search_by_vector_with_score( self, embedding: List[float], k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float, Any]]: """Run similarity search for query embedding asynchronously and return documents and scores Args: embedding (List[float]): Query embedding. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with scores """ args = { "collection": self.collection, "embedding": embedding, "k": k, "score_threshold": kwargs.get("score_threshold", 0), } # build additional filter criteria custom_filter = "" if filter: for key in filter: # check value type if type(filter[key]) in [str, bool]: filter_value = f"'{filter[key]}'" else: filter_value = f"{filter[key]}" custom_filter += f"and metadata.{key} = {filter_value} " query = f""" select id, text, metadata, embedding, vector::similarity::cosine(embedding, $embedding) as similarity from ⟨{args["collection"]}⟩ where vector::similarity::cosine(embedding, $embedding) >= $score_threshold {custom_filter} order by similarity desc LIMIT $k; """ results = await self.sdb.query(query, args) if len(results) == 0: return [] result = results[0] if result["status"] != "OK": from surrealdb.ws import SurrealException err = result.get("result", "Unknown Error") raise SurrealException(err) return [ ( Document( page_content=doc["text"], metadata={"id": doc["id"], **(doc.get("metadata") or {})}, ), doc["similarity"], doc["embedding"], ) for doc in result["result"] ] async def asimilarity_search_with_relevance_scores( self, query: str, k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search asynchronously and return relevance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance scores """ query_embedding = self.embedding_function.embed_query(query) return [ (document, similarity) for document, similarity, _ in ( await self._asimilarity_search_by_vector_with_score( query_embedding, k, filter=filter, **kwargs ) ) ] def similarity_search_with_relevance_scores( self, query: str, k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search synchronously and return relevance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance scores """ async def _similarity_search_with_relevance_scores() -> ( List[Tuple[Document, float]] ): await self.initialize() return await self.asimilarity_search_with_relevance_scores( query, k, filter=filter, **kwargs ) return asyncio.run(_similarity_search_with_relevance_scores()) async def asimilarity_search_with_score( self, query: str, k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search asynchronously and return distance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance distance scores """ query_embedding = self.embedding_function.embed_query(query) return [ (document, similarity) for document, similarity, _ in ( await self._asimilarity_search_by_vector_with_score( query_embedding, k, filter=filter, **kwargs ) ) ] def similarity_search_with_score( self, query: str, k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search synchronously and return distance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar along with relevance distance scores """ async def _similarity_search_with_score() -> List[Tuple[Document, float]]: await self.initialize() return await self.asimilarity_search_with_score( query, k, filter=filter, **kwargs ) return asyncio.run(_similarity_search_with_score()) async def asimilarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search on query embedding asynchronously Args: embedding (List[float]): Query embedding k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query """ return [ document for document, _, _ in await self._asimilarity_search_by_vector_with_score( embedding, k, filter=filter, **kwargs ) ] def similarity_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search on query embedding Args: embedding (List[float]): Query embedding k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query """ async def _similarity_search_by_vector() -> List[Document]: await self.initialize() return await self.asimilarity_search_by_vector( embedding, k, filter=filter, **kwargs ) return asyncio.run(_similarity_search_by_vector()) async def asimilarity_search( self, query: str, k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search on query asynchronously Args: query (str): Query k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query """ query_embedding = self.embedding_function.embed_query(query) return await self.asimilarity_search_by_vector( query_embedding, k, filter=filter, **kwargs ) def similarity_search( self, query: str, k: int = DEFAULT_K, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search on query Args: query (str): Query k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query """ async def _similarity_search() -> List[Document]: await self.initialize() return await self.asimilarity_search(query, k, filter=filter, **kwargs) return asyncio.run(_similarity_search()) async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ result = await self._asimilarity_search_by_vector_with_score( embedding, fetch_k, filter=filter, **kwargs ) # extract only document from result docs = [sub[0] for sub in result] # extract only embedding from result embeddings = [sub[-1] for sub in result] mmr_selected = maximal_marginal_relevance( np.array(embedding, dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) return [docs[i] for i in mmr_selected] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ async def _max_marginal_relevance_search_by_vector() -> List[Document]: await self.initialize() return await self.amax_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter=filter, **kwargs ) return asyncio.run(_max_marginal_relevance_search_by_vector()) async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function.embed_query(query) docs = await self.amax_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult, filter=filter, **kwargs ) return docs def max_marginal_relevance_search( self, query: str, k: int = DEFAULT_K, fetch_k: int = 20, lambda_mult: float = 0.5, *, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents selected by maximal marginal relevance. """ async def _max_marginal_relevance_search() -> List[Document]: await self.initialize() return await self.amax_marginal_relevance_search( query, k, fetch_k, lambda_mult, filter=filter, **kwargs ) return asyncio.run(_max_marginal_relevance_search()) @classmethod async def afrom_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> "SurrealDBStore": """Create SurrealDBStore from list of text asynchronously Args: texts (List[str]): list of text to vectorize and store embedding (Optional[Embeddings]): Embedding function. dburl (str): SurrealDB connection url (default: "ws://localhost:8000/rpc") ns (str): surrealdb namespace for the vector store. (default: "langchain") db (str): surrealdb database for the vector store. (default: "database") collection (str): surrealdb collection for the vector store. (default: "documents") (optional) db_user and db_pass: surrealdb credentials Returns: SurrealDBStore object initialized and ready for use.""" sdb = cls(embedding, **kwargs) await sdb.initialize() await sdb.aadd_texts(texts, metadatas, **kwargs) return sdb @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> "SurrealDBStore": """Create SurrealDBStore from list of text Args: texts (List[str]): list of text to vectorize and store embedding (Optional[Embeddings]): Embedding function. dburl (str): SurrealDB connection url ns (str): surrealdb namespace for the vector store. (default: "langchain") db (str): surrealdb database for the vector store. (default: "database") collection (str): surrealdb collection for the vector store. (default: "documents") (optional) db_user and db_pass: surrealdb credentials Returns: SurrealDBStore object initialized and ready for use.""" sdb = asyncio.run(cls.afrom_texts(texts, embedding, metadatas, **kwargs)) return sdb
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/jaguar.py
from __future__ import annotations import json import logging from typing import Any, List, Optional, Tuple from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore logger = logging.getLogger(__name__) class Jaguar(VectorStore): """`Jaguar API` vector store. See http://www.jaguardb.com See http://github.com/fserv/jaguar-sdk Example: .. code-block:: python from langchain_community.vectorstores.jaguar import Jaguar vectorstore = Jaguar( pod = 'vdb', store = 'mystore', vector_index = 'v', vector_type = 'cosine_fraction_float', vector_dimension = 1536, url='http://192.168.8.88:8080/fwww/', embedding=openai_model ) """ def __init__( self, pod: str, store: str, vector_index: str, vector_type: str, vector_dimension: int, url: str, embedding: Embeddings, ): self._pod = pod self._store = store self._vector_index = vector_index self._vector_type = vector_type self._vector_dimension = vector_dimension self._embedding = embedding try: from jaguardb_http_client.JaguarHttpClient import JaguarHttpClient except ImportError: raise ImportError( "Could not import jaguardb-http-client python package. " "Please install it with `pip install -U jaguardb-http-client`" ) self._jag = JaguarHttpClient(url) self._token = "" def login( self, jaguar_api_key: Optional[str] = "", ) -> bool: """ login to jaguardb server with a jaguar_api_key or let self._jag find a key Args: pod (str): name of a Pod store (str): name of a vector store optional jaguar_api_key (str): API key of user to jaguardb server Returns: True if successful; False if not successful """ if jaguar_api_key == "": jaguar_api_key = self._jag.getApiKey() self._jaguar_api_key = jaguar_api_key self._token = self._jag.login(jaguar_api_key) if self._token == "": logger.error("E0001 error init(): invalid jaguar_api_key") return False return True def create( self, metadata_str: str, text_size: int, ) -> None: """ create the vector store on the backend database Args: metadata_str (str): columns and their types Returns: True if successful; False if not successful """ podstore = self._pod + "." + self._store """ source column is required. v:text column is required. """ q = "create store " q += podstore q += f" ({self._vector_index} vector({self._vector_dimension}," q += f" '{self._vector_type}')," q += f" source char(256), v:text char({text_size})," q += metadata_str + ")" self.run(q) def run(self, query: str, withFile: bool = False) -> dict: """ Run any query statement in jaguardb Args: query (str): query statement to jaguardb Returns: None for invalid token, or json result string """ if self._token == "": logger.error(f"E0005 error run({query})") return {} resp = self._jag.post(query, self._token, withFile) txt = resp.text try: js = json.loads(txt) return js except Exception: return {} @property def embeddings(self) -> Optional[Embeddings]: return self._embedding def add_texts( # type: ignore[override] self, texts: List[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """ Add texts through the embeddings and add to the vectorstore. Args: texts: list of text strings to add to the jaguar vector store. metadatas: Optional list of metadatas associated with the texts. [{"m1": "v11", "m2": "v12", "m3": "v13", "filecol": "path_file1.jpg" }, {"m1": "v21", "m2": "v22", "m3": "v23", "filecol": "path_file2.jpg" }, {"m1": "v31", "m2": "v32", "m3": "v33", "filecol": "path_file3.jpg" }, {"m1": "v41", "m2": "v42", "m3": "v43", "filecol": "path_file4.jpg" }] kwargs: vector_index=name_of_vector_index file_column=name_of_file_column Returns: List of ids from adding the texts into the vectorstore """ vcol = self._vector_index filecol = kwargs.get("file_column", "") text_tag = kwargs.get("text_tag", "") podstorevcol = self._pod + "." + self._store + "." + vcol q = "textcol " + podstorevcol js = self.run(q) if js == "": return [] textcol = js["data"] if text_tag != "": tag_texts = [] for t in texts: tag_texts.append(text_tag + " " + t) texts = tag_texts embeddings = self._embedding.embed_documents(list(texts)) ids = [] if metadatas is None: ### no meta and no files to upload i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] values_comma = ",".join(str_vec) podstore = self._pod + "." + self._store q = "insert into " + podstore + " (" q += vcol + "," + textcol + ") values ('" + values_comma txt = texts[i].replace("'", "\\'") q += "','" + txt + "')" js = self.run(q, False) ids.append(js["zid"]) i += 1 else: i = 0 for vec in embeddings: str_vec = [str(x) for x in vec] nvec, vvec, filepath = self._parseMeta(metadatas[i], filecol) if filecol != "": rc = self._jag.postFile(self._token, filepath, 1) if not rc: return [] names_comma = ",".join(nvec) names_comma += "," + vcol ## col1,col2,col3,vecl values_comma = "'" + "','".join(vvec) + "'" ### 'va1','val2','val3' values_comma += ",'" + ",".join(str_vec) + "'" ### 'v1,v2,v3' podstore = self._pod + "." + self._store q = "insert into " + podstore + " (" q += names_comma + "," + textcol + ") values (" + values_comma txt = texts[i].replace("'", "\\'") q += ",'" + txt + "')" if filecol != "": js = self.run(q, True) else: js = self.run(q, False) ids.append(js["zid"]) i += 1 return ids def similarity_search_with_score( self, query: str, k: int = 3, fetch_k: int = -1, where: Optional[str] = None, args: Optional[str] = None, metadatas: Optional[List[str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """ Return Jaguar documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 3. lambda_val: lexical match parameter for hybrid search. where: the where clause in select similarity. For example a where can be "rating > 3.0 and (state = 'NV' or state = 'CA')" args: extra options passed to select similarity kwargs: vector_index=vcol, vector_type=cosine_fraction_float Returns: List of Documents most similar to the query and score for each. List of Tuples of (doc, similarity_score): [ (doc, score), (doc, score), ...] """ vcol = self._vector_index vtype = self._vector_type embeddings = self._embedding.embed_query(query) str_embeddings = [str(f) for f in embeddings] qv_comma = ",".join(str_embeddings) podstore = self._pod + "." + self._store q = ( "select similarity(" + vcol + ",'" + qv_comma + "','topk=" + str(k) + ",fetch_k=" + str(fetch_k) + ",type=" + vtype ) q += ",with_score=yes,with_text=yes" if args is not None: q += "," + args if metadatas is not None: meta = "&".join(metadatas) q += ",metadata=" + meta q += "') from " + podstore if where is not None: q += " where " + where jarr = self.run(q) if jarr is None: return [] docs_with_score = [] for js in jarr: score = js["score"] text = js["text"] zid = js["zid"] ### give metadatas md = {} md["zid"] = zid if metadatas is not None: for m in metadatas: mv = js[m] md[m] = mv doc = Document(page_content=text, metadata=md) tup = (doc, score) docs_with_score.append(tup) return docs_with_score def similarity_search( self, query: str, k: int = 3, where: Optional[str] = None, metadatas: Optional[List[str]] = None, **kwargs: Any, ) -> List[Document]: """ Return Jaguar documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. where: the where clause in select similarity. For example a where can be "rating > 3.0 and (state = 'NV' or state = 'CA')" Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( query, k=k, where=where, metadatas=metadatas, **kwargs ) return [doc for doc, _ in docs_and_scores] def is_anomalous( self, query: str, **kwargs: Any, ) -> bool: """ Detect if given text is anomalous from the dataset Args: query: Text to detect if it is anomaly Returns: True or False """ vcol = self._vector_index vtype = self._vector_type embeddings = self._embedding.embed_query(query) str_embeddings = [str(f) for f in embeddings] qv_comma = ",".join(str_embeddings) podstore = self._pod + "." + self._store q = "select anomalous(" + vcol + ", '" + qv_comma + "', 'type=" + vtype + "')" q += " from " + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: return False jd = json.loads(js[0]) if jd["anomalous"] == "YES": return True return False @classmethod def from_texts( # type: ignore[override] cls, texts: List[str], embedding: Embeddings, url: str, pod: str, store: str, vector_index: str, vector_type: str, vector_dimension: int, metadatas: Optional[List[dict]] = None, jaguar_api_key: Optional[str] = "", **kwargs: Any, ) -> Jaguar: jagstore = cls( pod, store, vector_index, vector_type, vector_dimension, url, embedding ) jagstore.login(jaguar_api_key) jagstore.clear() jagstore.add_texts(texts, metadatas, **kwargs) return jagstore def clear(self) -> None: """ Delete all records in jaguardb Args: No args Returns: None """ podstore = self._pod + "." + self._store q = "truncate store " + podstore self.run(q) def delete(self, zids: List[str], **kwargs: Any) -> None: # type: ignore[override] """ Delete records in jaguardb by a list of zero-ids Args: pod (str): name of a Pod ids (List[str]): a list of zid as string Returns: Do not return anything """ podstore = self._pod + "." + self._store for zid in zids: q = "delete from " + podstore + " where zid='" + zid + "'" self.run(q) def count(self) -> int: """ Count records of a store in jaguardb Args: no args Returns: (int) number of records in pod store """ podstore = self._pod + "." + self._store q = "select count() from " + podstore js = self.run(q) if isinstance(js, list) and len(js) == 0: return 0 jd = json.loads(js[0]) return int(jd["data"]) def drop(self) -> None: """ Drop or remove a store in jaguardb Args: no args Returns: None """ podstore = self._pod + "." + self._store q = "drop store " + podstore self.run(q) def logout(self) -> None: """ Logout to cleanup resources Args: no args Returns: None """ self._jag.logout(self._token) def prt(self, msg: str) -> None: with open("/tmp/debugjaguar.log", "a") as file: print(f"msg={msg}", file=file, flush=True) def _parseMeta(self, nvmap: dict, filecol: str) -> Tuple[List[str], List[str], str]: filepath = "" if filecol == "": nvec = list(nvmap.keys()) vvec = list(nvmap.values()) else: nvec = [] vvec = [] if filecol in nvmap: nvec.append(filecol) vvec.append(nvmap[filecol]) filepath = nvmap[filecol] for k, v in nvmap.items(): if k != filecol: nvec.append(k) vvec.append(v) vvec_s = [str(e) for e in vvec] return nvec, vvec_s, filepath
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/epsilla.py
"""Wrapper around Epsilla vector database.""" from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Type from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.vectorstores import VectorStore if TYPE_CHECKING: from pyepsilla import vectordb logger = logging.getLogger() class Epsilla(VectorStore): """ Wrapper around Epsilla vector database. As a prerequisite, you need to install ``pyepsilla`` package and have a running Epsilla vector database (for example, through our docker image) See the following documentation for how to run an Epsilla vector database: https://epsilla-inc.gitbook.io/epsilladb/quick-start Args: client (Any): Epsilla client to connect to. embeddings (Embeddings): Function used to embed the texts. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". Example: .. code-block:: python from langchain_community.vectorstores import Epsilla from pyepsilla import vectordb client = vectordb.Client() embeddings = OpenAIEmbeddings() db_path = "/tmp/vectorstore" db_name = "langchain_store" epsilla = Epsilla(client, embeddings, db_path, db_name) """ _LANGCHAIN_DEFAULT_DB_NAME: str = "langchain_store" _LANGCHAIN_DEFAULT_DB_PATH: str = "/tmp/langchain-epsilla" _LANGCHAIN_DEFAULT_TABLE_NAME: str = "langchain_collection" def __init__( self, client: Any, embeddings: Embeddings, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, ): """Initialize with necessary components.""" try: import pyepsilla except ImportError as e: raise ImportError( "Could not import pyepsilla python package. " "Please install pyepsilla package with `pip install pyepsilla`." ) from e if not isinstance( client, (pyepsilla.vectordb.Client, pyepsilla.cloud.client.Vectordb) ): raise TypeError( "client should be an instance of pyepsilla.vectordb.Client or " f"pyepsilla.cloud.client.Vectordb, got {type(client)}" ) self._client: vectordb.Client = client self._db_name = db_name self._embeddings = embeddings self._collection_name = Epsilla._LANGCHAIN_DEFAULT_TABLE_NAME self._client.load_db(db_name=db_name, db_path=db_path) self._client.use_db(db_name=db_name) @property def embeddings(self) -> Optional[Embeddings]: return self._embeddings def use_collection(self, collection_name: str) -> None: """ Set default collection to use. Args: collection_name (str): The name of the collection. """ self._collection_name = collection_name def clear_data(self, collection_name: str = "") -> None: """ Clear data in a collection. Args: collection_name (Optional[str]): The name of the collection. If not provided, the default collection will be used. """ if not collection_name: collection_name = self._collection_name self._client.drop_table(collection_name) def get( self, collection_name: str = "", response_fields: Optional[List[str]] = None ) -> List[dict]: """Get the collection. Args: collection_name (Optional[str]): The name of the collection to retrieve data from. If not provided, the default collection will be used. response_fields (Optional[List[str]]): List of field names in the result. If not specified, all available fields will be responded. Returns: A list of the retrieved data. """ if not collection_name: collection_name = self._collection_name status_code, response = self._client.get( table_name=collection_name, response_fields=response_fields ) if status_code != 200: logger.error(f"Failed to get records: {response['message']}") raise Exception("Error: {}.".format(response["message"])) return response["result"] def _create_collection( self, table_name: str, embeddings: list, metadatas: Optional[list[dict]] = None ) -> None: if not embeddings: raise ValueError("Embeddings list is empty.") dim = len(embeddings[0]) fields: List[dict] = [ {"name": "id", "dataType": "INT"}, {"name": "text", "dataType": "STRING"}, {"name": "embeddings", "dataType": "VECTOR_FLOAT", "dimensions": dim}, ] if metadatas is not None: field_names = [field["name"] for field in fields] for metadata in metadatas: for key, value in metadata.items(): if key in field_names: continue d_type: str if isinstance(value, str): d_type = "STRING" elif isinstance(value, int): d_type = "INT" elif isinstance(value, float): d_type = "FLOAT" elif isinstance(value, bool): d_type = "BOOL" else: raise ValueError(f"Unsupported data type for {key}.") fields.append({"name": key, "dataType": d_type}) field_names.append(key) status_code, response = self._client.create_table( table_name, table_fields=fields ) if status_code != 200: if status_code == 409: logger.info(f"Continuing with the existing table {table_name}.") else: logger.error( f"Failed to create collection {table_name}: {response['message']}" ) raise Exception("Error: {}.".format(response["message"])) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, collection_name: Optional[str] = "", drop_old: Optional[bool] = False, **kwargs: Any, ) -> List[str]: """ Embed texts and add them to the database. Args: texts (Iterable[str]): The texts to embed. metadatas (Optional[List[dict]]): Metadata dicts attached to each of the texts. Defaults to None. collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: List of ids of the added texts. """ if not collection_name: collection_name = self._collection_name else: self._collection_name = collection_name if drop_old: self._client.drop_db(db_name=collection_name) texts = list(texts) try: embeddings = self._embeddings.embed_documents(texts) except NotImplementedError: embeddings = [self._embeddings.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] self._create_collection( table_name=collection_name, embeddings=embeddings, metadatas=metadatas ) ids = [hash(uuid.uuid4()) for _ in texts] records = [] for index, id in enumerate(ids): record = { "id": id, "text": texts[index], "embeddings": embeddings[index], } if metadatas is not None: metadata = metadatas[index].items() for key, value in metadata: record[key] = value records.append(record) status_code, response = self._client.insert( table_name=collection_name, records=records ) if status_code != 200: logger.error( f"Failed to add records to {collection_name}: {response['message']}" ) raise Exception("Error: {}.".format(response["message"])) return [str(id) for id in ids] def similarity_search( self, query: str, k: int = 4, collection_name: str = "", **kwargs: Any ) -> List[Document]: """ Return the documents that are semantically most relevant to the query. Args: query (str): String to query the vectorstore with. k (Optional[int]): Number of documents to return. Defaults to 4. collection_name (Optional[str]): Collection to use. Defaults to "langchain_store" or the one provided before. Returns: List of documents that are semantically most relevant to the query """ if not collection_name: collection_name = self._collection_name query_vector = self._embeddings.embed_query(query) status_code, response = self._client.query( table_name=collection_name, query_field="embeddings", query_vector=query_vector, limit=k, ) if status_code != 200: logger.error(f"Search failed: {response['message']}.") raise Exception("Error: {}.".format(response["message"])) exclude_keys = ["id", "text", "embeddings"] return list( map( lambda item: Document( page_content=item["text"], metadata={ key: item[key] for key in item if key not in exclude_keys }, ), response["result"], ) ) @classmethod def from_texts( cls: Type[Epsilla], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Any = None, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str] = _LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool] = False, **kwargs: Any, ) -> Epsilla: """Create an Epsilla vectorstore from raw documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ instance = Epsilla(client, embedding, db_path=db_path, db_name=db_name) instance.add_texts( texts, metadatas=metadatas, collection_name=collection_name, drop_old=drop_old, **kwargs, ) return instance @classmethod def from_documents( cls: Type[Epsilla], documents: List[Document], embedding: Embeddings, client: Any = None, db_path: Optional[str] = _LANGCHAIN_DEFAULT_DB_PATH, db_name: Optional[str] = _LANGCHAIN_DEFAULT_DB_NAME, collection_name: Optional[str] = _LANGCHAIN_DEFAULT_TABLE_NAME, drop_old: Optional[bool] = False, **kwargs: Any, ) -> Epsilla: """Create an Epsilla vectorstore from a list of documents. Args: texts (List[str]): List of text data to be inserted. embeddings (Embeddings): Embedding function. client (pyepsilla.vectordb.Client): Epsilla client to connect to. metadatas (Optional[List[dict]]): Metadata for each text. Defaults to None. db_path (Optional[str]): The path where the database will be persisted. Defaults to "/tmp/langchain-epsilla". db_name (Optional[str]): Give a name to the loaded database. Defaults to "langchain_store". collection_name (Optional[str]): Which collection to use. Defaults to "langchain_collection". If provided, default collection name will be set as well. drop_old (Optional[bool]): Whether to drop the previous collection and create a new one. Defaults to False. Returns: Epsilla: Epsilla vector store. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts, embedding, metadatas=metadatas, client=client, db_path=db_path, db_name=db_name, collection_name=collection_name, drop_old=drop_old, **kwargs, )
0
lc_public_repos/langchain/libs/community/langchain_community
lc_public_repos/langchain/libs/community/langchain_community/vectorstores/analyticdb.py
from __future__ import annotations import logging import uuid from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Type from sqlalchemy import REAL, Column, String, Table, create_engine, insert, text from sqlalchemy.dialects.postgresql import ARRAY, JSON, TEXT try: from sqlalchemy.orm import declarative_base except ImportError: from sqlalchemy.ext.declarative import declarative_base from langchain_core.documents import Document from langchain_core.embeddings import Embeddings from langchain_core.utils import get_from_dict_or_env from langchain_core.vectorstores import VectorStore _LANGCHAIN_DEFAULT_EMBEDDING_DIM = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain_document" Base = declarative_base() # type: Any class AnalyticDB(VectorStore): """`AnalyticDB` (distributed PostgreSQL) vector store. AnalyticDB is a distributed full postgresql syntax cloud-native database. - `connection_string` is a postgres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_collection` if True, will delete the collection if it exists. (default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, engine_args: Optional[dict] = None, ) -> None: self.connection_string = connection_string self.embedding_function = embedding_function self.embedding_dimension = embedding_dimension self.collection_name = collection_name self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.__post_init__(engine_args) def __post_init__( self, engine_args: Optional[dict] = None, ) -> None: """ Initialize the store. """ _engine_args = engine_args or {} if ( "pool_recycle" not in _engine_args ): # Check if pool_recycle is not in _engine_args _engine_args["pool_recycle"] = ( 3600 # Set pool_recycle to 3600s if not present ) self.engine = create_engine(self.connection_string, **_engine_args) self.create_collection() @property def embeddings(self) -> Embeddings: return self.embedding_function def _select_relevance_score_fn(self) -> Callable[[float], float]: return self._euclidean_relevance_score_fn def create_table_if_not_exists(self) -> None: # Define the dynamic table Table( self.collection_name, Base.metadata, Column("id", TEXT, primary_key=True, default=uuid.uuid4), Column("embedding", ARRAY(REAL)), Column("document", String, nullable=True), Column("metadata", JSON, nullable=True), extend_existing=True, ) with self.engine.connect() as conn: with conn.begin(): # Create the table Base.metadata.create_all(conn) # Check if the index exists index_name = f"{self.collection_name}_embedding_idx" index_query = text( f""" SELECT 1 FROM pg_indexes WHERE indexname = '{index_name}'; """ ) result = conn.execute(index_query).scalar() # Create the index if it doesn't exist if not result: index_statement = text( f""" CREATE INDEX {index_name} ON {self.collection_name} USING ann(embedding) WITH ( "dim" = {self.embedding_dimension}, "hnsw_m" = 100 ); """ ) conn.execute(index_statement) def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() self.create_table_if_not_exists() def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") drop_statement = text(f"DROP TABLE IF EXISTS {self.collection_name};") with self.engine.connect() as conn: with conn.begin(): conn.execute(drop_statement) def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 500, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid4()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] # Define the table schema chunks_table = Table( self.collection_name, Base.metadata, Column("id", TEXT, primary_key=True), Column("embedding", ARRAY(REAL)), Column("document", String, nullable=True), Column("metadata", JSON, nullable=True), extend_existing=True, ) chunks_table_data = [] with self.engine.connect() as conn: with conn.begin(): for document, metadata, chunk_id, embedding in zip( texts, metadatas, ids, embeddings ): chunks_table_data.append( { "id": chunk_id, "embedding": embedding, "document": document, "metadata": metadata, } ) # Execute the batch insert when the batch size is reached if len(chunks_table_data) == batch_size: conn.execute(insert(chunks_table).values(chunks_table_data)) # Clear the chunks_table_data list for the next batch chunks_table_data.clear() # Insert any remaining records that didn't make up a full batch if chunks_table_data: conn.execute(insert(chunks_table).values(chunks_table_data)) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with AnalyticDB with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, ) def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: # Add the filter if provided try: from sqlalchemy.engine import Row except ImportError: raise ImportError( "Could not import Row from sqlalchemy.engine. " "Please 'pip install sqlalchemy>=1.4'." ) filter_condition = "" if filter is not None: conditions = [ f"metadata->>{key!r} = {value!r}" for key, value in filter.items() ] filter_condition = f"WHERE {' AND '.join(conditions)}" # Define the base query sql_query = f""" SELECT *, l2_distance(embedding, :embedding) as distance FROM {self.collection_name} {filter_condition} ORDER BY embedding <-> :embedding LIMIT :k """ # Set up the query parameters params = {"embedding": embedding, "k": k} # Execute the query and fetch the results with self.engine.connect() as conn: results: Sequence[Row] = conn.execute(text(sql_query), params).fetchall() documents_with_scores = [ ( Document( page_content=result.document, metadata=result.metadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return documents_with_scores def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError("No ids provided to delete.") # Define the table schema chunks_table = Table( self.collection_name, Base.metadata, Column("id", TEXT, primary_key=True), Column("embedding", ARRAY(REAL)), Column("document", String, nullable=True), Column("metadata", JSON, nullable=True), extend_existing=True, ) try: with self.engine.connect() as conn: with conn.begin(): delete_condition = chunks_table.c.id.in_(ids) conn.execute(chunks_table.delete().where(delete_condition)) return True except Exception as e: print("Delete operation failed:", str(e)) # noqa: T201 return False @classmethod def from_texts( cls: Type[AnalyticDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, engine_args: Optional[dict] = None, **kwargs: Any, ) -> AnalyticDB: """ Return VectorStore initialized from texts and embeddings. Postgres Connection string is required Either pass it as a parameter or set the PG_CONNECTION_STRING environment variable. """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, embedding_dimension=embedding_dimension, pre_delete_collection=pre_delete_collection, engine_args=engine_args, ) store.add_texts(texts=texts, metadatas=metadatas, ids=ids, **kwargs) return store @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs, key="connection_string", env_key="PG_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the PG_CONNECTION_STRING environment variable." ) return connection_string @classmethod def from_documents( cls: Type[AnalyticDB], documents: List[Document], embedding: Embeddings, embedding_dimension: int = _LANGCHAIN_DEFAULT_EMBEDDING_DIM, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, engine_args: Optional[dict] = None, **kwargs: Any, ) -> AnalyticDB: """ Return VectorStore initialized from documents and embeddings. Postgres Connection string is required Either pass it as a parameter or set the PG_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, embedding_dimension=embedding_dimension, metadatas=metadatas, ids=ids, collection_name=collection_name, engine_args=engine_args, **kwargs, ) @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"