method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_timescalevector_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': '0'})]
def test_timescalevector_with_metadatas() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection =True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': '0'})]
Test end to end construction and search.
_components_strict
"""Get components or err.""" if self.components is None: raise ValueError('No components found in spec. ') return self.components
@property def _components_strict(self) ->Components: """Get components or err.""" if self.components is None: raise ValueError('No components found in spec. ') return self.components
Get components or err.
test_variable_key_naming
"""Test that natbot handles variable key naming correctly.""" nat_bot_chain = NatBotChain.from_llm(FakeLLM(), objective='testing', input_url_key='u', input_browser_content_key='b', output_key='c') output = nat_bot_chain.execute('foo', 'foo') assert output == 'bar'
def test_variable_key_naming() ->None: """Test that natbot handles variable key naming correctly.""" nat_bot_chain = NatBotChain.from_llm(FakeLLM(), objective='testing', input_url_key='u', input_browser_content_key='b', output_key='c') output = nat_bot_chain.execute('foo', 'foo') assert output == 'bar'
Test that natbot handles variable key naming correctly.
search_data
request = QueryRequest(table_name=self.config.table_name, namespace=self. config.namespace, vector=embedding, include_vector=True, output_fields= self.config.output_fields, filter=generate_filter_query(), top_k=k) query_result = self.ha3_engine_client.query(request) return json.loads(query_result.body)
def search_data() ->Dict[str, Any]: request = QueryRequest(table_name=self.config.table_name, namespace= self.config.namespace, vector=embedding, include_vector=True, output_fields=self.config.output_fields, filter= generate_filter_query(), top_k=k) query_result = self.ha3_engine_client.query(request) return json.loads(query_result.body)
null
_load_documents_from_ids
"""Load documents from a list of IDs.""" if not self.document_ids: raise ValueError('document_ids must be set') return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
def _load_documents_from_ids(self) ->List[Document]: """Load documents from a list of IDs.""" if not self.document_ids: raise ValueError('document_ids must be set') return [self._load_document_from_id(doc_id) for doc_id in self.document_ids ]
Load documents from a list of IDs.
_build_qstr
q_emb_str = ','.join(map(str, q_emb)) if where_str: where_str = f'PREWHERE {where_str}' else: where_str = '' q_str = f""" SELECT {self.config.column_map['text']}, {self.config.column_map['metadata']}, dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}]) AS dist {self.dist_order} LIMIT {topk} """ return q_str
def _build_qstr(self, q_emb: List[float], topk: int, where_str: Optional[ str]=None) ->str: q_emb_str = ','.join(map(str, q_emb)) if where_str: where_str = f'PREWHERE {where_str}' else: where_str = '' q_str = f""" SELECT {self.config.column_map['text']}, {self.config.column_map['metadata']}, dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}]) AS dist {self.dist_order} LIMIT {topk} """ return q_str
null
from_documents
"""Construct ElasticsearchStore wrapper from documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_documents( texts, embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk. """ elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(embedding= embedding, **kwargs) elasticsearchStore.add_documents(documents, bulk_kwargs=bulk_kwargs) return elasticsearchStore
@classmethod def from_documents(cls, documents: List[Document], embedding: Optional[ Embeddings]=None, bulk_kwargs: Optional[Dict]=None, **kwargs: Any ) ->'ElasticsearchStore': """Construct ElasticsearchStore wrapper from documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_documents( texts, embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk. """ elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(embedding =embedding, **kwargs) elasticsearchStore.add_documents(documents, bulk_kwargs=bulk_kwargs) return elasticsearchStore
Construct ElasticsearchStore wrapper from documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_documents( texts, embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. Do not provide if using a strategy that doesn't require inference. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk.
test_slack_directory_loader
"""Test Slack directory loader.""" file_path = Path(__file__).parent.parent / 'examples/slack_export.zip' loader = SlackDirectoryLoader(str(file_path)) docs = loader.load() assert len(docs) == 5
def test_slack_directory_loader() ->None: """Test Slack directory loader.""" file_path = Path(__file__).parent.parent / 'examples/slack_export.zip' loader = SlackDirectoryLoader(str(file_path)) docs = loader.load() assert len(docs) == 5
Test Slack directory loader.
available_functions
"""Map the available functions that can be invoked.""" return self.client.available_functions
@property def available_functions(self) ->List[dict]: """Map the available functions that can be invoked.""" return self.client.available_functions
Map the available functions that can be invoked.
on_tool_end
self.on_tool_end_common()
def on_tool_end(self, *args: Any, **kwargs: Any) ->Any: self.on_tool_end_common()
null
_invocation_params
return {**{'model': self.model_name}, **super()._invocation_params}
@property def _invocation_params(self) ->Dict[str, Any]: return {**{'model': self.model_name}, **super()._invocation_params}
null
embed_query
"""Return simple embeddings.""" return [float(1.0)] * (self.dimension - 1) + [float(0.0)]
def embed_query(self, text: str) ->List[float]: """Return simple embeddings.""" return [float(1.0)] * (self.dimension - 1) + [float(0.0)]
Return simple embeddings.
_dump_document_as_bytes
"""Return a bytes representation of a document.""" if not isinstance(obj, Document): raise TypeError('Expected a Document instance') return dumps(obj).encode('utf-8')
def _dump_document_as_bytes(obj: Document) ->bytes: """Return a bytes representation of a document.""" if not isinstance(obj, Document): raise TypeError('Expected a Document instance') return dumps(obj).encode('utf-8')
Return a bytes representation of a document.
markdown
"""Add a Markdown element to the container and return its index.""" kwargs = {'body': body, 'unsafe_allow_html': unsafe_allow_html, 'help': help} new_dg = self._get_dg(index).markdown(**kwargs) record = ChildRecord(ChildType.MARKDOWN, kwargs, new_dg) return self._add_record(record, index)
def markdown(self, body: SupportsStr, unsafe_allow_html: bool=False, *, help: Optional[str]=None, index: Optional[int]=None) ->int: """Add a Markdown element to the container and return its index.""" kwargs = {'body': body, 'unsafe_allow_html': unsafe_allow_html, 'help': help} new_dg = self._get_dg(index).markdown(**kwargs) record = ChildRecord(ChildType.MARKDOWN, kwargs, new_dg) return self._add_record(record, index)
Add a Markdown element to the container and return its index.
last_state
return self._integration.last_state
@property def last_state(self) ->Any: return self._integration.last_state
null
_strip_erroneous_leading_spaces
"""Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == ' ' for line in text.split( '\n')[1:]) if has_leading_space: return text.replace('\n ', '\n') else: return text
def _strip_erroneous_leading_spaces(text: str) ->str: """Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == ' ' for line in text. split('\n')[1:]) if has_leading_space: return text.replace('\n ', '\n') else: return text
Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space.
_response_to_result
"""Converts a PaLM API response into a LangChain ChatResult.""" if not response.candidates: raise ChatGooglePalmError('ChatResponse must have at least one candidate.') generations: List[ChatGeneration] = [] for candidate in response.candidates: author = candidate.get('author') if author is None: raise ChatGooglePalmError( f'ChatResponse must have an author: {candidate}') content = _truncate_at_stop_tokens(candidate.get('content', ''), stop) if content is None: raise ChatGooglePalmError( f'ChatResponse must have a content: {candidate}') if author == 'ai': generations.append(ChatGeneration(text=content, message=AIMessage( content=content))) elif author == 'human': generations.append(ChatGeneration(text=content, message= HumanMessage(content=content))) else: generations.append(ChatGeneration(text=content, message=ChatMessage (role=author, content=content))) return ChatResult(generations=generations)
def _response_to_result(response: genai.types.ChatResponse, stop: Optional[ List[str]]) ->ChatResult: """Converts a PaLM API response into a LangChain ChatResult.""" if not response.candidates: raise ChatGooglePalmError( 'ChatResponse must have at least one candidate.') generations: List[ChatGeneration] = [] for candidate in response.candidates: author = candidate.get('author') if author is None: raise ChatGooglePalmError( f'ChatResponse must have an author: {candidate}') content = _truncate_at_stop_tokens(candidate.get('content', ''), stop) if content is None: raise ChatGooglePalmError( f'ChatResponse must have a content: {candidate}') if author == 'ai': generations.append(ChatGeneration(text=content, message= AIMessage(content=content))) elif author == 'human': generations.append(ChatGeneration(text=content, message= HumanMessage(content=content))) else: generations.append(ChatGeneration(text=content, message= ChatMessage(role=author, content=content))) return ChatResult(generations=generations)
Converts a PaLM API response into a LangChain ChatResult.
load
return list(self.lazy_load())
def load(self) ->List[Document]: return list(self.lazy_load())
null
from_embeddings
"""Construct PGVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain_community.vectorstores import PGVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids= ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs)
@classmethod def from_embeddings(cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME, distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY, ids: Optional[List[str]]=None, pre_delete_collection: bool=False, **kwargs: Any ) ->PGVector: """Construct PGVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain_community.vectorstores import PGVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, distance_strategy= distance_strategy, pre_delete_collection=pre_delete_collection, ** kwargs)
Construct PGVector wrapper from raw documents and pre- generated embeddings. Return VectorStore initialized from documents and embeddings. Postgres connection string is required "Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. Example: .. code-block:: python from langchain_community.vectorstores import PGVector from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings)
__init__
"""Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format. """ try: import jq except ImportError: raise ImportError( 'jq package not found, please install it with `pip install jq`') self.file_path = Path(file_path).resolve() self._jq_schema = jq.compile(jq_schema) self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content self._json_lines = json_lines
def __init__(self, file_path: Union[str, Path], jq_schema: str, content_key: Optional[str]=None, metadata_func: Optional[Callable[[Dict, Dict], Dict ]]=None, text_content: bool=True, json_lines: bool=False): """Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format. """ try: import jq except ImportError: raise ImportError( 'jq package not found, please install it with `pip install jq`') self.file_path = Path(file_path).resolve() self._jq_schema = jq.compile(jq_schema) self._content_key = content_key self._metadata_func = metadata_func self._text_content = text_content self._json_lines = json_lines
Initialize the JSONLoader. Args: file_path (Union[str, Path]): The path to the JSON or JSON Lines file. jq_schema (str): The jq schema to use to extract the data or text from the JSON. content_key (str): The key to use to extract the content from the JSON if the jq_schema results to a list of objects (dict). metadata_func (Callable[Dict, Dict]): A function that takes in the JSON object extracted by the jq_schema and the default metadata and returns a dict of the updated metadata. text_content (bool): Boolean flag to indicate whether the content is in string format, default to True. json_lines (bool): Boolean flag to indicate whether the input is in JSON Lines format.
test_promptlayer_chat_openai_generate
"""Test PromptLayerChatOpenAI wrapper with generate.""" chat = PromptLayerChatOpenAI(max_tokens=10, n=2) message = HumanMessage(content='Hello') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 2 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
def test_promptlayer_chat_openai_generate() ->None: """Test PromptLayerChatOpenAI wrapper with generate.""" chat = PromptLayerChatOpenAI(max_tokens=10, n=2) message = HumanMessage(content='Hello') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 2 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
Test PromptLayerChatOpenAI wrapper with generate.
test_get_normal_transaction
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b' loader = EtherscanLoader(account_address) result = loader.load() assert len(result) > 0, 'No transactions returned'
@pytest.mark.skipif(not etherscan_key_set, reason= 'Etherscan API key not provided.') def test_get_normal_transaction() ->None: account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b' loader = EtherscanLoader(account_address) result = loader.load() assert len(result) > 0, 'No transactions returned'
null
is_lc_serializable
return False
@classmethod def is_lc_serializable(cls) ->bool: return False
null
test_action_w_namespace_w_emb2
str1 = 'test1' str2 = 'test2' str3 = 'test3' encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [{'test_namespace1': encoded_str1}, {'test_namespace2': encoded_str2}, {'test_namespace3': encoded_str3}] assert base.embed(base.Embed([{'test_namespace1': str1}, {'test_namespace2': str2}, {'test_namespace3': str3}]), MockEncoder()) == expected expected_embed_and_keep = [{'test_namespace1': str1 + ' ' + encoded_str1}, {'test_namespace2': str2 + ' ' + encoded_str2}, {'test_namespace3': str3 + ' ' + encoded_str3}] assert base.embed(base.EmbedAndKeep([{'test_namespace1': str1}, { 'test_namespace2': str2}, {'test_namespace3': str3}]), MockEncoder() ) == expected_embed_and_keep
@pytest.mark.requires('vowpal_wabbit_next') def test_action_w_namespace_w_emb2() ->None: str1 = 'test1' str2 = 'test2' str3 = 'test3' encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [{'test_namespace1': encoded_str1}, {'test_namespace2': encoded_str2}, {'test_namespace3': encoded_str3}] assert base.embed(base.Embed([{'test_namespace1': str1}, { 'test_namespace2': str2}, {'test_namespace3': str3}]), MockEncoder() ) == expected expected_embed_and_keep = [{'test_namespace1': str1 + ' ' + encoded_str1}, {'test_namespace2': str2 + ' ' + encoded_str2}, { 'test_namespace3': str3 + ' ' + encoded_str3}] assert base.embed(base.EmbedAndKeep([{'test_namespace1': str1}, { 'test_namespace2': str2}, {'test_namespace3': str3}]), MockEncoder() ) == expected_embed_and_keep
null
replace_glob
for file in parent.glob(glob): if not file.is_file(): continue replace_file(file, replacements)
def replace_glob(parent: Path, glob: str, replacements: Dict[str, str]) ->None: for file in parent.glob(glob): if not file.is_file(): continue replace_file(file, replacements)
null
__init__
np = guard_import('numpy') sklearn_neighbors = guard_import('sklearn.neighbors', pip_name='scikit-learn') self._np = np self._neighbors = sklearn_neighbors.NearestNeighbors(metric=metric, **kwargs) self._neighbors_fitted = False self._embedding_function = embedding self._persist_path = persist_path self._serializer: Optional[BaseSerializer] = None if self._persist_path is not None: serializer_cls = SERIALIZER_MAP[serializer] self._serializer = serializer_cls(persist_path=self._persist_path) self._embeddings: List[List[float]] = [] self._texts: List[str] = [] self._metadatas: List[dict] = [] self._ids: List[str] = [] self._embeddings_np: Any = np.asarray([]) if self._persist_path is not None and os.path.isfile(self._persist_path): self._load()
def __init__(self, embedding: Embeddings, *, persist_path: Optional[str]= None, serializer: Literal['json', 'bson', 'parquet']='json', metric: str='cosine', **kwargs: Any) ->None: np = guard_import('numpy') sklearn_neighbors = guard_import('sklearn.neighbors', pip_name= 'scikit-learn') self._np = np self._neighbors = sklearn_neighbors.NearestNeighbors(metric=metric, ** kwargs) self._neighbors_fitted = False self._embedding_function = embedding self._persist_path = persist_path self._serializer: Optional[BaseSerializer] = None if self._persist_path is not None: serializer_cls = SERIALIZER_MAP[serializer] self._serializer = serializer_cls(persist_path=self._persist_path) self._embeddings: List[List[float]] = [] self._texts: List[str] = [] self._metadatas: List[dict] = [] self._ids: List[str] = [] self._embeddings_np: Any = np.asarray([]) if self._persist_path is not None and os.path.isfile(self._persist_path): self._load()
null
_embed_with_retry
return embeddings.embed(*args, **kwargs)
@retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) ->Any: return embeddings.embed(*args, **kwargs)
null
_import_deeplake
from langchain_community.vectorstores.deeplake import DeepLake return DeepLake
def _import_deeplake() ->Any: from langchain_community.vectorstores.deeplake import DeepLake return DeepLake
null
embeddings
if self.using_table_name in self.table2embeddings: return self.table2embeddings[self.using_table_name] return None
@property def embeddings(self) ->Optional[Embeddings]: if self.using_table_name in self.table2embeddings: return self.table2embeddings[self.using_table_name] return None
null
_default_params
"""Get the default parameters for calling textgen.""" return {'max_new_tokens': self.max_new_tokens, 'do_sample': self.do_sample, 'temperature': self.temperature, 'top_p': self.top_p, 'typical_p': self .typical_p, 'epsilon_cutoff': self.epsilon_cutoff, 'eta_cutoff': self. eta_cutoff, 'repetition_penalty': self.repetition_penalty, 'top_k': self.top_k, 'min_length': self.min_length, 'no_repeat_ngram_size': self .no_repeat_ngram_size, 'num_beams': self.num_beams, 'penalty_alpha': self.penalty_alpha, 'length_penalty': self.length_penalty, 'early_stopping': self.early_stopping, 'seed': self.seed, 'add_bos_token': self.add_bos_token, 'truncation_length': self. truncation_length, 'ban_eos_token': self.ban_eos_token, 'skip_special_tokens': self.skip_special_tokens, 'stopping_strings': self.stopping_strings}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling textgen.""" return {'max_new_tokens': self.max_new_tokens, 'do_sample': self. do_sample, 'temperature': self.temperature, 'top_p': self.top_p, 'typical_p': self.typical_p, 'epsilon_cutoff': self.epsilon_cutoff, 'eta_cutoff': self.eta_cutoff, 'repetition_penalty': self. repetition_penalty, 'top_k': self.top_k, 'min_length': self. min_length, 'no_repeat_ngram_size': self.no_repeat_ngram_size, 'num_beams': self.num_beams, 'penalty_alpha': self.penalty_alpha, 'length_penalty': self.length_penalty, 'early_stopping': self. early_stopping, 'seed': self.seed, 'add_bos_token': self. add_bos_token, 'truncation_length': self.truncation_length, 'ban_eos_token': self.ban_eos_token, 'skip_special_tokens': self. skip_special_tokens, 'stopping_strings': self.stopping_strings}
Get the default parameters for calling textgen.
_get_relevant_documents
return [Document(page_content='foo'), Document(page_content='bar')]
def _get_relevant_documents(self, query: str, *, callbacks: Callbacks=None, tags: Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Document]: return [Document(page_content='foo'), Document(page_content='bar')]
null
load
"""Load webpages into Documents.""" soup = self.scrape() title = soup.title.text lyrics = soup.find_all('div', {'class': ''})[2].text text = title + lyrics metadata = {'source': self.web_path} return [Document(page_content=text, metadata=metadata)]
def load(self) ->List[Document]: """Load webpages into Documents.""" soup = self.scrape() title = soup.title.text lyrics = soup.find_all('div', {'class': ''})[2].text text = title + lyrics metadata = {'source': self.web_path} return [Document(page_content=text, metadata=metadata)]
Load webpages into Documents.
as_field
from redis.commands.search.field import VectorField field_data = super()._fields() field_data.update({'M': self.m, 'EF_CONSTRUCTION': self.ef_construction, 'EF_RUNTIME': self.ef_runtime, 'EPSILON': self.epsilon}) return VectorField(self.name, self.algorithm, field_data)
def as_field(self) ->VectorField: from redis.commands.search.field import VectorField field_data = super()._fields() field_data.update({'M': self.m, 'EF_CONSTRUCTION': self.ef_construction, 'EF_RUNTIME': self.ef_runtime, 'EPSILON': self.epsilon}) return VectorField(self.name, self.algorithm, field_data)
null
test_auto_scorer_with_user_defined_llm
llm, PROMPT = setup() scorer_llm = FakeListChatModel(responses=['300']) chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder())) response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(['0', '1', '2'])) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score == 300.0
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers') def test_auto_scorer_with_user_defined_llm() ->None: llm, PROMPT = setup() scorer_llm = FakeListChatModel(responses=['300']) chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed =False, model=MockEncoder())) response = chain.run(User=rl_chain.BasedOn('Context'), action=rl_chain. ToSelectFrom(['0', '1', '2'])) assert response['response'] == 'hey' selection_metadata = response['selection_metadata'] assert selection_metadata.selected.score == 300.0
null
from_llm
"""Get the response parser.""" output_parser = APIResponderOutputParser() prompt = PromptTemplate(template=RESPONSE_TEMPLATE, output_parser= output_parser, input_variables=['response', 'instructions']) return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool=True, **kwargs: Any ) ->LLMChain: """Get the response parser.""" output_parser = APIResponderOutputParser() prompt = PromptTemplate(template=RESPONSE_TEMPLATE, output_parser= output_parser, input_variables=['response', 'instructions']) return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
Get the response parser.
test_valid_code_validation
"""Test the validator.""" PALChain.validate_code(_SAMPLE_CODE_1, _FULL_CODE_VALIDATIONS)
def test_valid_code_validation() ->None: """Test the validator.""" PALChain.validate_code(_SAMPLE_CODE_1, _FULL_CODE_VALIDATIONS)
Test the validator.
con_str
file_path = tmp_path / 'db.sqlite3' con_str = f'sqlite:///{file_path}' return con_str
@pytest.fixture() def con_str(tmp_path: Path) ->str: file_path = tmp_path / 'db.sqlite3' con_str = f'sqlite:///{file_path}' return con_str
null
get_num_rows
"""Gets the number of "feasible" rows for the DataFrame""" try: import psutil except ImportError as e: raise ImportError( 'psutil not installed. Please install it with `pip install psutil`.' ) from e row = self.df.limit(1).collect()[0] estimated_row_size = sys.getsizeof(row) mem_info = psutil.virtual_memory() available_memory = mem_info.available max_num_rows = int(available_memory / estimated_row_size * self. fraction_of_memory) return min(max_num_rows, self.df.count()), max_num_rows
def get_num_rows(self) ->Tuple[int, int]: """Gets the number of "feasible" rows for the DataFrame""" try: import psutil except ImportError as e: raise ImportError( 'psutil not installed. Please install it with `pip install psutil`.' ) from e row = self.df.limit(1).collect()[0] estimated_row_size = sys.getsizeof(row) mem_info = psutil.virtual_memory() available_memory = mem_info.available max_num_rows = int(available_memory / estimated_row_size * self. fraction_of_memory) return min(max_num_rows, self.df.count()), max_num_rows
Gets the number of "feasible" rows for the DataFrame
hybrid_search_with_score
"""Return docs most similar to query with an hybrid query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ from azure.search.documents.models import Vector results = self.client.search(search_text=query, vectors=[Vector(value=np. array(self.embedding_function(query), dtype=np.float32).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR)], filter=filters, top=k) docs = [(Document(page_content=result.pop(FIELDS_CONTENT), metadata={**{ FIELDS_ID: result.pop(FIELDS_ID)} if FIELDS_ID in result else {}, ** json.loads(result[FIELDS_METADATA]) if FIELDS_METADATA in result else { k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR}}), float (result['@search.score'])) for result in results] return docs
def hybrid_search_with_score(self, query: str, k: int=4, filters: Optional[ str]=None) ->List[Tuple[Document, float]]: """Return docs most similar to query with an hybrid query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ from azure.search.documents.models import Vector results = self.client.search(search_text=query, vectors=[Vector(value= np.array(self.embedding_function(query), dtype=np.float32).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR)], filter=filters, top=k) docs = [(Document(page_content=result.pop(FIELDS_CONTENT), metadata={** {FIELDS_ID: result.pop(FIELDS_ID)} if FIELDS_ID in result else {}, **json.loads(result[FIELDS_METADATA]) if FIELDS_METADATA in result else {k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR}}), float(result['@search.score'])) for result in results] return docs
Return docs most similar to query with an hybrid query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each
test_load_success
docs = retriever.get_relevant_documents(query='1605.08386') assert len(docs) == 1 assert_docs(docs, all_meta=False)
def test_load_success(retriever: ArxivRetriever) ->None: docs = retriever.get_relevant_documents(query='1605.08386') assert len(docs) == 1 assert_docs(docs, all_meta=False)
null
predict_messages
text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop, **kwargs) return AIMessage(content=content)
def predict_messages(self, messages: List[BaseMessage], *, stop: Optional[ Sequence[str]]=None, **kwargs: Any) ->BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) content = self(text, stop=_stop, **kwargs) return AIMessage(content=content)
null
mock_lakefs_client_no_presign_not_local
with patch('langchain_community.document_loaders.lakefs.LakeFSClient' ) as mock_lakefs_client: mock_lakefs_client.return_value.ls_objects.return_value = [( 'path_bla.txt', 'https://physical_address_bla')] mock_lakefs_client.return_value.is_presign_supported.return_value = False yield mock_lakefs_client.return_value
@pytest.fixture def mock_lakefs_client_no_presign_not_local() ->Any: with patch('langchain_community.document_loaders.lakefs.LakeFSClient' ) as mock_lakefs_client: mock_lakefs_client.return_value.ls_objects.return_value = [( 'path_bla.txt', 'https://physical_address_bla')] (mock_lakefs_client.return_value.is_presign_supported.return_value ) = False yield mock_lakefs_client.return_value
null
get_sync
"""Get the equivalent sync RunManager. Returns: CallbackManagerForToolRun: The sync RunManager. """ return CallbackManagerForToolRun(run_id=self.run_id, handlers=self.handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self. parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags, metadata=self.metadata, inheritable_metadata=self.inheritable_metadata)
def get_sync(self) ->CallbackManagerForToolRun: """Get the equivalent sync RunManager. Returns: CallbackManagerForToolRun: The sync RunManager. """ return CallbackManagerForToolRun(run_id=self.run_id, handlers=self. handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags= self.inheritable_tags, metadata=self.metadata, inheritable_metadata =self.inheritable_metadata)
Get the equivalent sync RunManager. Returns: CallbackManagerForToolRun: The sync RunManager.
test_visit_structured_query
query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None, limit=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1) expected = query, {'filter': '( doc.foo < 1 )'} structured_query = StructuredQuery(query=query, filter=comp, limit=None) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator= Comparator.LT, attribute='abc', value=1)]) structured_query = StructuredQuery(query=query, filter=op, limit=None) expected = query, {'filter': "( ( doc.foo < 2 ) and ( doc.bar = 'baz' ) and ( doc.abc < 1 ) )"} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
def test_visit_structured_query() ->None: query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None, limit=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1) expected = query, {'filter': '( doc.foo < 1 )'} structured_query = StructuredQuery(query=query, filter=comp, limit=None) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.LT, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator =Comparator.LT, attribute='abc', value=1)]) structured_query = StructuredQuery(query=query, filter=op, limit=None) expected = query, {'filter': "( ( doc.foo < 2 ) and ( doc.bar = 'baz' ) and ( doc.abc < 1 ) )"} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
null
astream_log
...
@overload def astream_log(self, input: Any, config: Optional[RunnableConfig]=None, *, diff: Literal[True]=True, with_streamed_output_list: bool=True, include_names: Optional[Sequence[str]]=None, include_types: Optional[ Sequence[str]]=None, include_tags: Optional[Sequence[str]]=None, exclude_names: Optional[Sequence[str]]=None, exclude_types: Optional[ Sequence[str]]=None, exclude_tags: Optional[Sequence[str]]=None, ** kwargs: Optional[Any]) ->AsyncIterator[RunLogPatch]: ...
null
exists
return self.redis_client.exists(f'{self.full_key_prefix}:{key}') == 1
def exists(self, key: str) ->bool: return self.redis_client.exists(f'{self.full_key_prefix}:{key}') == 1
null
test_openai_streaming_call
"""Test valid call to openai.""" llm = _get_llm(max_tokens=10, streaming=True) output = llm('Say foo:') assert isinstance(output, str)
@pytest.mark.scheduled def test_openai_streaming_call() ->None: """Test valid call to openai.""" llm = _get_llm(max_tokens=10, streaming=True) output = llm('Say foo:') assert isinstance(output, str)
Test valid call to openai.
validate_environment
"""Validate environment. Args: values: The values to validate. """ try: from apify_client import ApifyClient values['apify_client'] = ApifyClient() except ImportError: raise ImportError( 'Could not import apify-client Python package. Please install it with `pip install apify-client`.' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate environment. Args: values: The values to validate. """ try: from apify_client import ApifyClient values['apify_client'] = ApifyClient() except ImportError: raise ImportError( 'Could not import apify-client Python package. Please install it with `pip install apify-client`.' ) return values
Validate environment. Args: values: The values to validate.
OutputType
return Union[StringPromptValue, ChatPromptValueConcrete]
@property def OutputType(self) ->Any: return Union[StringPromptValue, ChatPromptValueConcrete]
null
_load_agent_from_tools
config_type = config.pop('_type') if config_type not in AGENT_TO_CLASS: raise ValueError(f'Loading {config_type} agent not supported') agent_cls = AGENT_TO_CLASS[config_type] combined_config = {**config, **kwargs} return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
def _load_agent_from_tools(config: dict, llm: BaseLanguageModel, tools: List[Tool], **kwargs: Any) ->Union[BaseSingleActionAgent, BaseMultiActionAgent]: config_type = config.pop('_type') if config_type not in AGENT_TO_CLASS: raise ValueError(f'Loading {config_type} agent not supported') agent_cls = AGENT_TO_CLASS[config_type] combined_config = {**config, **kwargs} return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
null
test_cerebriumai_call
"""Test valid call to cerebriumai.""" llm = CerebriumAI(max_length=10) output = llm('Say foo:') assert isinstance(output, str)
def test_cerebriumai_call() ->None: """Test valid call to cerebriumai.""" llm = CerebriumAI(max_length=10) output = llm('Say foo:') assert isinstance(output, str)
Test valid call to cerebriumai.
validate_environment
"""Validate that GPT4All library is installed.""" try: from gpt4all import Embed4All values['client'] = Embed4All() except ImportError: raise ImportError( 'Could not import gpt4all library. Please install the gpt4all library to use this embedding model: pip install gpt4all' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that GPT4All library is installed.""" try: from gpt4all import Embed4All values['client'] = Embed4All() except ImportError: raise ImportError( 'Could not import gpt4all library. Please install the gpt4all library to use this embedding model: pip install gpt4all' ) return values
Validate that GPT4All library is installed.
similarity_search_with_score
"""Run similarity search synchronously and return distance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. Returns: List of Documents most similar along with relevance distance scores """ async def _similarity_search_with_score() ->List[Tuple[Document, float]]: await self.initialize() return await self.asimilarity_search_with_score(query, k, **kwargs) return asyncio.run(_similarity_search_with_score())
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any ) ->List[Tuple[Document, float]]: """Run similarity search synchronously and return distance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. Returns: List of Documents most similar along with relevance distance scores """ async def _similarity_search_with_score() ->List[Tuple[Document, float]]: await self.initialize() return await self.asimilarity_search_with_score(query, k, **kwargs) return asyncio.run(_similarity_search_with_score())
Run similarity search synchronously and return distance scores Args: query (str): Query k (int): Number of results to return. Defaults to 4. Returns: List of Documents most similar along with relevance distance scores
stream
return self.transform(iter([input]), config, **kwargs)
def stream(self, input: Dict[str, Any], config: Optional[RunnableConfig]= None, **kwargs: Any) ->Iterator[Dict[str, Any]]: return self.transform(iter([input]), config, **kwargs)
null
batch
return self._batch_with_config(self._batch, inputs, config, return_exceptions=return_exceptions, **kwargs)
def batch(self, inputs: List[Input], config: Optional[Union[RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs: Any) ->List[Output]: return self._batch_with_config(self._batch, inputs, config, return_exceptions=return_exceptions, **kwargs)
null
test_mosaicml_embedding_endpoint
"""Test MosaicML embeddings with a different endpoint""" documents = ['foo bar'] embedding = MosaicMLInstructorEmbeddings(endpoint_url= 'https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 768
def test_mosaicml_embedding_endpoint() ->None: """Test MosaicML embeddings with a different endpoint""" documents = ['foo bar'] embedding = MosaicMLInstructorEmbeddings(endpoint_url= 'https://models.hosted-on.mosaicml.hosting/instructor-xl/v1/predict') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 768
Test MosaicML embeddings with a different endpoint
container
"""The container we're writing into.""" return self._container
@property def container(self) ->MutableExpander: """The container we're writing into.""" return self._container
The container we're writing into.
max_marginal_relevance_search_by_vector
"""Perform a search and return results that are reordered by MMR. Args: embedding (str): The embedding vector being searched. k (int, optional): How many results to give. Defaults to 4. fetch_k (int, optional): Total results to select k from. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug('No existing collection to search.') return [] if param is None: param = self.search_params output_fields = self.fields[:] output_fields.remove(self._vector_field) res = self.col.search(data=[embedding], anns_field=self._vector_field, param=param, limit=fetch_k, expr=expr, output_fields=output_fields, timeout=timeout, **kwargs) ids = [] documents = [] scores = [] for result in res[0]: data = {x: result.entity.get(x) for x in output_fields} doc = self._parse_document(data) documents.append(doc) scores.append(result.score) ids.append(result.id) vectors = self.col.query(expr=f'{self._primary_field} in {ids}', output_fields=[self._primary_field, self._vector_field], timeout=timeout) vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors} ordered_result_embeddings = [vectors[x] for x in ids] new_ordering = maximal_marginal_relevance(np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult) ret = [] for x in new_ordering: if x == -1: break else: ret.append(documents[x]) return ret
def max_marginal_relevance_search_by_vector(self, embedding: list[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, param: Optional[dict]= None, expr: Optional[str]=None, timeout: Optional[int]=None, **kwargs: Any ) ->List[Document]: """Perform a search and return results that are reordered by MMR. Args: embedding (str): The embedding vector being searched. k (int, optional): How many results to give. Defaults to 4. fetch_k (int, optional): Total results to select k from. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug('No existing collection to search.') return [] if param is None: param = self.search_params output_fields = self.fields[:] output_fields.remove(self._vector_field) res = self.col.search(data=[embedding], anns_field=self._vector_field, param=param, limit=fetch_k, expr=expr, output_fields=output_fields, timeout=timeout, **kwargs) ids = [] documents = [] scores = [] for result in res[0]: data = {x: result.entity.get(x) for x in output_fields} doc = self._parse_document(data) documents.append(doc) scores.append(result.score) ids.append(result.id) vectors = self.col.query(expr=f'{self._primary_field} in {ids}', output_fields=[self._primary_field, self._vector_field], timeout= timeout) vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors} ordered_result_embeddings = [vectors[x] for x in ids] new_ordering = maximal_marginal_relevance(np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult) ret = [] for x in new_ordering: if x == -1: break else: ret.append(documents[x]) return ret
Perform a search and return results that are reordered by MMR. Args: embedding (str): The embedding vector being searched. k (int, optional): How many results to give. Defaults to 4. fetch_k (int, optional): Total results to select k from. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search.
test_load_success_more
load_max_docs = 10 loader = WikipediaLoader(query='HUNTER X HUNTER', load_max_docs=load_max_docs) docs = loader.load() assert len(docs) == load_max_docs assert_docs(docs, all_meta=False)
def test_load_success_more() ->None: load_max_docs = 10 loader = WikipediaLoader(query='HUNTER X HUNTER', load_max_docs= load_max_docs) docs = loader.load() assert len(docs) == load_max_docs assert_docs(docs, all_meta=False)
null
test_file_toolkit_get_tools_with_selection
"""Test the get_tools method of FileManagementToolkit with selected_tools.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir, selected_tools=[ 'read_file', 'write_file']) tools = toolkit.get_tools() assert len(tools) == 2 tool_names = [tool.name for tool in tools] assert 'read_file' in tool_names assert 'write_file' in tool_names
def test_file_toolkit_get_tools_with_selection() ->None: """Test the get_tools method of FileManagementToolkit with selected_tools.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir, selected_tools=[ 'read_file', 'write_file']) tools = toolkit.get_tools() assert len(tools) == 2 tool_names = [tool.name for tool in tools] assert 'read_file' in tool_names assert 'write_file' in tool_names
Test the get_tools method of FileManagementToolkit with selected_tools.
_load_stuff_chain
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name= document_variable_name, document_prompt=document_prompt, verbose= verbose, **kwargs)
def _load_stuff_chain(llm: BaseLanguageModel, prompt: BasePromptTemplate= stuff_prompt.PROMPT, document_prompt: BasePromptTemplate=stuff_prompt. EXAMPLE_PROMPT, document_variable_name: str='summaries', verbose: Optional[bool]=None, **kwargs: Any) ->StuffDocumentsChain: llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name= document_variable_name, document_prompt=document_prompt, verbose= verbose, **kwargs)
null
key
"""Construct the record key to use""" return self.key_prefix + self.session_id
@property def key(self) ->str: """Construct the record key to use""" return self.key_prefix + self.session_id
Construct the record key to use
_generate
"""Run the LLM on the given prompt and input.""" try: from clarifai.client.input import Inputs from clarifai.client.model import Model except ImportError: raise ImportError( 'Could not import clarifai python package. Please install it with `pip install clarifai`.' ) if self.pat is not None: pat = self.pat if self.model_url is not None: _model_init = Model(url=self.model_url, pat=pat) else: _model_init = Model(model_id=self.model_id, user_id=self.user_id, app_id=self.app_id, pat=pat) generations = [] batch_size = 32 input_obj = Inputs(pat=pat) try: for i in range(0, len(prompts), batch_size): batch = prompts[i:i + batch_size] input_batch = [input_obj.get_text_input(input_id=str(id), raw_text= inp) for id, inp in enumerate(batch)] (inference_params := {} ) if inference_params is None else inference_params predict_response = _model_init.predict(inputs=input_batch, inference_params=inference_params) for output in predict_response.outputs: if stop is not None: text = enforce_stop_tokens(output.data.text.raw, stop) else: text = output.data.text.raw generations.append([Generation(text=text)]) except Exception as e: logger.error(f'Predict failed, exception: {e}') return LLMResult(generations=generations)
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, inference_params: Optional[Dict[str, Any]]=None, **kwargs: Any) ->LLMResult: """Run the LLM on the given prompt and input.""" try: from clarifai.client.input import Inputs from clarifai.client.model import Model except ImportError: raise ImportError( 'Could not import clarifai python package. Please install it with `pip install clarifai`.' ) if self.pat is not None: pat = self.pat if self.model_url is not None: _model_init = Model(url=self.model_url, pat=pat) else: _model_init = Model(model_id=self.model_id, user_id=self.user_id, app_id=self.app_id, pat=pat) generations = [] batch_size = 32 input_obj = Inputs(pat=pat) try: for i in range(0, len(prompts), batch_size): batch = prompts[i:i + batch_size] input_batch = [input_obj.get_text_input(input_id=str(id), raw_text=inp) for id, inp in enumerate(batch)] (inference_params := {} ) if inference_params is None else inference_params predict_response = _model_init.predict(inputs=input_batch, inference_params=inference_params) for output in predict_response.outputs: if stop is not None: text = enforce_stop_tokens(output.data.text.raw, stop) else: text = output.data.text.raw generations.append([Generation(text=text)]) except Exception as e: logger.error(f'Predict failed, exception: {e}') return LLMResult(generations=generations)
Run the LLM on the given prompt and input.
_embed
try: from aleph_alpha_client import Prompt, SemanticEmbeddingRequest, SemanticRepresentation except ImportError: raise ValueError( 'Could not import aleph_alpha_client python package. Please install it with `pip install aleph_alpha_client`.' ) query_params = {'prompt': Prompt.from_text(text), 'representation': SemanticRepresentation.Symmetric, 'compress_to_size': self. compress_to_size, 'normalize': self.normalize, 'contextual_control_threshold': self.contextual_control_threshold, 'control_log_additive': self.control_log_additive} query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed(request=query_request, model= self.model) return query_response.embedding
def _embed(self, text: str) ->List[float]: try: from aleph_alpha_client import Prompt, SemanticEmbeddingRequest, SemanticRepresentation except ImportError: raise ValueError( 'Could not import aleph_alpha_client python package. Please install it with `pip install aleph_alpha_client`.' ) query_params = {'prompt': Prompt.from_text(text), 'representation': SemanticRepresentation.Symmetric, 'compress_to_size': self. compress_to_size, 'normalize': self.normalize, 'contextual_control_threshold': self.contextual_control_threshold, 'control_log_additive': self.control_log_additive} query_request = SemanticEmbeddingRequest(**query_params) query_response = self.client.semantic_embed(request=query_request, model=self.model) return query_response.embedding
null
lazy_load
try: from newspaper import Article except ImportError as e: raise ImportError( 'Cannot import newspaper, please install with `pip install newspaper3k`' ) from e for url in self.urls: try: article = Article(url, **self.newspaper_kwargs) article.download() article.parse_folder() if self.nlp: article.nlp() except Exception as e: if self.continue_on_failure: logger.error(f'Error fetching or processing {url}, exception: {e}') continue else: raise e metadata = {'title': getattr(article, 'title', ''), 'link': getattr( article, 'url', getattr(article, 'canonical_link', '')), 'authors': getattr(article, 'authors', []), 'language': getattr(article, 'meta_lang', ''), 'description': getattr(article, 'meta_description', ''), 'publish_date': getattr(article, 'publish_date', '')} if self.text_mode: content = article.text else: content = article.html if self.nlp: metadata['keywords'] = getattr(article, 'keywords', []) metadata['summary'] = getattr(article, 'summary', '') yield Document(page_content=content, metadata=metadata)
def lazy_load(self) ->Iterator[Document]: try: from newspaper import Article except ImportError as e: raise ImportError( 'Cannot import newspaper, please install with `pip install newspaper3k`' ) from e for url in self.urls: try: article = Article(url, **self.newspaper_kwargs) article.download() article.parse_folder() if self.nlp: article.nlp() except Exception as e: if self.continue_on_failure: logger.error( f'Error fetching or processing {url}, exception: {e}') continue else: raise e metadata = {'title': getattr(article, 'title', ''), 'link': getattr (article, 'url', getattr(article, 'canonical_link', '')), 'authors': getattr(article, 'authors', []), 'language': getattr (article, 'meta_lang', ''), 'description': getattr(article, 'meta_description', ''), 'publish_date': getattr(article, 'publish_date', '')} if self.text_mode: content = article.text else: content = article.html if self.nlp: metadata['keywords'] = getattr(article, 'keywords', []) metadata['summary'] = getattr(article, 'summary', '') yield Document(page_content=content, metadata=metadata)
null
deprecated_property
"""original doc""" return 'This is a deprecated property.'
@property @deprecated(since='2.0.0', removal='3.0.0') def deprecated_property(self) ->str: """original doc""" return 'This is a deprecated property.'
original doc
_safe_next
try: return next(self.generator) except StopIteration: return None
def _safe_next(self) ->Any: try: return next(self.generator) except StopIteration: return None
null
embed
"""Just duplicate the query m times.""" output = MagicMock() embeddings = [] for i, inp in enumerate(inputs): inp = inp['input'] if 'pizza' in inp: v = [1.0, 0.0, 0.0] elif 'document' in inp: v = [0.0, 0.9, 0.0] else: v = [0.0, 0.0, -1.0] if len(inp) > 10: v[2] += 0.1 output_inner = MagicMock() output_inner.embedding = v embeddings.append(output_inner) output.embeddings = embeddings return output
def embed(self, inputs: List[Dict[str, str]]) ->Any: """Just duplicate the query m times.""" output = MagicMock() embeddings = [] for i, inp in enumerate(inputs): inp = inp['input'] if 'pizza' in inp: v = [1.0, 0.0, 0.0] elif 'document' in inp: v = [0.0, 0.9, 0.0] else: v = [0.0, 0.0, -1.0] if len(inp) > 10: v[2] += 0.1 output_inner = MagicMock() output_inner.embedding = v embeddings.append(output_inner) output.embeddings = embeddings return output
Just duplicate the query m times.
setup
self.delete_all_indexes()
@pytest.fixture(autouse=True) def setup(self) ->None: self.delete_all_indexes()
null
on_llm_end
"""Log the latency, error, token usage, and response to Infino.""" self.end_time = time.time() duration = self.end_time - self.start_time self._send_to_infino('latency', duration) self._send_to_infino('error', self.error) for generations in response.generations: for generation in generations: self._send_to_infino('prompt_response', generation.text, is_ts=False) if response.llm_output is not None and isinstance(response.llm_output, Dict): token_usage = response.llm_output['token_usage'] if token_usage is not None: prompt_tokens = token_usage['prompt_tokens'] total_tokens = token_usage['total_tokens'] completion_tokens = token_usage['completion_tokens'] self._send_to_infino('prompt_tokens', prompt_tokens) self._send_to_infino('total_tokens', total_tokens) self._send_to_infino('completion_tokens', completion_tokens) if self.is_chat_openai_model: messages = ' '.join(generation.message.content for generation in generations) completion_tokens = get_num_tokens(messages, openai_model_name=self. chat_openai_model_name) self._send_to_infino('completion_tokens', completion_tokens)
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None: """Log the latency, error, token usage, and response to Infino.""" self.end_time = time.time() duration = self.end_time - self.start_time self._send_to_infino('latency', duration) self._send_to_infino('error', self.error) for generations in response.generations: for generation in generations: self._send_to_infino('prompt_response', generation.text, is_ts= False) if response.llm_output is not None and isinstance(response.llm_output, Dict ): token_usage = response.llm_output['token_usage'] if token_usage is not None: prompt_tokens = token_usage['prompt_tokens'] total_tokens = token_usage['total_tokens'] completion_tokens = token_usage['completion_tokens'] self._send_to_infino('prompt_tokens', prompt_tokens) self._send_to_infino('total_tokens', total_tokens) self._send_to_infino('completion_tokens', completion_tokens) if self.is_chat_openai_model: messages = ' '.join(generation.message.content for generation in generations) completion_tokens = get_num_tokens(messages, openai_model_name=self .chat_openai_model_name) self._send_to_infino('completion_tokens', completion_tokens)
Log the latency, error, token usage, and response to Infino.
__init__
try: import redis except ImportError: raise ImportError( 'Could not import redis python package. Please install it with `pip install redis`.' ) super().__init__(*args, **kwargs) try: self.redis_client = get_client(redis_url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl
def __init__(self, session_id: str='default', url: str= 'redis://localhost:6379/0', key_prefix: str='memory_store', ttl: Optional[int]=60 * 60 * 24, recall_ttl: Optional[int]=60 * 60 * 24 * 3, *args: Any, **kwargs: Any): try: import redis except ImportError: raise ImportError( 'Could not import redis python package. Please install it with `pip install redis`.' ) super().__init__(*args, **kwargs) try: self.redis_client = get_client(redis_url=url, decode_responses=True) except redis.exceptions.ConnectionError as error: logger.error(error) self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl self.recall_ttl = recall_ttl or ttl
null
test_openai_call
"""Test valid call to openai.""" output = llm('Say something nice:') assert isinstance(output, str)
@pytest.mark.scheduled def test_openai_call(llm: AzureOpenAI) ->None: """Test valid call to openai.""" output = llm('Say something nice:') assert isinstance(output, str)
Test valid call to openai.
_get_google_search
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) ->BaseTool: return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
null
_default_params
"""Get the default parameters for calling OpenAI API.""" set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name return {'model': set_model_value, 'force_timeout': self.request_timeout, 'max_tokens': self.max_tokens, 'stream': self.streaming, 'n': self.n, 'temperature': self.temperature, 'custom_llm_provider': self. custom_llm_provider, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name return {'model': set_model_value, 'force_timeout': self.request_timeout, 'max_tokens': self.max_tokens, 'stream': self.streaming, 'n': self. n, 'temperature': self.temperature, 'custom_llm_provider': self. custom_llm_provider, **self.model_kwargs}
Get the default parameters for calling OpenAI API.
test_replicate_call
"""Test simple non-streaming call to Replicate.""" llm = Replicate(model=TEST_MODEL) output = llm('What is LangChain') assert output assert isinstance(output, str)
def test_replicate_call() ->None: """Test simple non-streaming call to Replicate.""" llm = Replicate(model=TEST_MODEL) output = llm('What is LangChain') assert output assert isinstance(output, str)
Test simple non-streaming call to Replicate.
test_mset_and_mget
key_value_pairs = [('key1', b'value1'), ('key2', b'value2')] file_store.mset(key_value_pairs) values = file_store.mget(['key1', 'key2']) assert values == [b'value1', b'value2']
def test_mset_and_mget(file_store: LocalFileStore) ->None: key_value_pairs = [('key1', b'value1'), ('key2', b'value2')] file_store.mset(key_value_pairs) values = file_store.mget(['key1', 'key2']) assert values == [b'value1', b'value2']
null
load
"""Load the specified URLs using Selenium and create Document instances. Returns: List[Document]: A list of Document instances with loaded content. """ from unstructured.partition.html import partition_html docs: List[Document] = list() driver = self._get_driver() for url in self.urls: try: driver.get(url) page_content = driver.page_source elements = partition_html(text=page_content) text = '\n\n'.join([str(el) for el in elements]) metadata = self._build_metadata(url, driver) docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error(f'Error fetching or processing {url}, exception: {e}') else: raise e driver.quit() return docs
def load(self) ->List[Document]: """Load the specified URLs using Selenium and create Document instances. Returns: List[Document]: A list of Document instances with loaded content. """ from unstructured.partition.html import partition_html docs: List[Document] = list() driver = self._get_driver() for url in self.urls: try: driver.get(url) page_content = driver.page_source elements = partition_html(text=page_content) text = '\n\n'.join([str(el) for el in elements]) metadata = self._build_metadata(url, driver) docs.append(Document(page_content=text, metadata=metadata)) except Exception as e: if self.continue_on_failure: logger.error( f'Error fetching or processing {url}, exception: {e}') else: raise e driver.quit() return docs
Load the specified URLs using Selenium and create Document instances. Returns: List[Document]: A list of Document instances with loaded content.
_check_response
if any(len(d['embedding']) == 1 for d in response['data']) and not skip_empty: import openai raise openai.error.APIError('OpenAI API returned an empty embedding') return response
def _check_response(response: dict, skip_empty: bool=False) ->dict: if any(len(d['embedding']) == 1 for d in response['data'] ) and not skip_empty: import openai raise openai.error.APIError('OpenAI API returned an empty embedding') return response
null
run
"""Run SceneXplain image explainer.""" description = self._describe_image(image) if not description: return 'No description found.' return description
def run(self, image: str) ->str: """Run SceneXplain image explainer.""" description = self._describe_image(image) if not description: return 'No description found.' return description
Run SceneXplain image explainer.
test_retrieval_qa_saving_loading
"""Test saving and loading.""" loader = TextLoader('docs/extras/modules/state_of_the_union.txt') documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() docsearch = FAISS.from_documents(texts, embeddings) qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=docsearch.as_retriever()) qa.run('What did the president say about Ketanji Brown Jackson?') file_path = tmp_path / 'RetrievalQA_chain.yaml' qa.save(file_path=file_path) qa_loaded = load_chain(file_path, retriever=docsearch.as_retriever()) assert qa_loaded == qa
def test_retrieval_qa_saving_loading(tmp_path: Path) ->None: """Test saving and loading.""" loader = TextLoader('docs/extras/modules/state_of_the_union.txt') documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() docsearch = FAISS.from_documents(texts, embeddings) qa = RetrievalQA.from_llm(llm=OpenAI(), retriever=docsearch.as_retriever()) qa.run('What did the president say about Ketanji Brown Jackson?') file_path = tmp_path / 'RetrievalQA_chain.yaml' qa.save(file_path=file_path) qa_loaded = load_chain(file_path, retriever=docsearch.as_retriever()) assert qa_loaded == qa
Test saving and loading.
include_run
if run.id == self.root_id: return False run_tags = run.tags or [] if self.include_names is None and self.include_types is None and self.include_tags is None: include = True else: include = False if self.include_names is not None: include = include or run.name in self.include_names if self.include_types is not None: include = include or run.run_type in self.include_types if self.include_tags is not None: include = include or any(tag in self.include_tags for tag in run_tags) if self.exclude_names is not None: include = include and run.name not in self.exclude_names if self.exclude_types is not None: include = include and run.run_type not in self.exclude_types if self.exclude_tags is not None: include = include and all(tag not in self.exclude_tags for tag in run_tags) return include
def include_run(self, run: Run) ->bool: if run.id == self.root_id: return False run_tags = run.tags or [] if (self.include_names is None and self.include_types is None and self. include_tags is None): include = True else: include = False if self.include_names is not None: include = include or run.name in self.include_names if self.include_types is not None: include = include or run.run_type in self.include_types if self.include_tags is not None: include = include or any(tag in self.include_tags for tag in run_tags) if self.exclude_names is not None: include = include and run.name not in self.exclude_names if self.exclude_types is not None: include = include and run.run_type not in self.exclude_types if self.exclude_tags is not None: include = include and all(tag not in self.exclude_tags for tag in run_tags) return include
null
_find_matching_id
for id, result in self._results.items(): if result['uuid'] == uuid: return id return None
def _find_matching_id(self, uuid: str) ->Union[str, None]: for id, result in self._results.items(): if result['uuid'] == uuid: return id return None
null
parse_result
results = super().parse_result(result) name_dict = {tool.__name__: tool for tool in self.tools} return [name_dict[res['type']](**res['args']) for res in results]
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any: results = super().parse_result(result) name_dict = {tool.__name__: tool for tool in self.tools} return [name_dict[res['type']](**res['args']) for res in results]
null
visit_AsyncFunctionDef
if not node.args.args: return input_arg_name = node.args.args[0].arg IsLocalDict(input_arg_name, self.keys).visit(node)
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) ->Any: if not node.args.args: return input_arg_name = node.args.args[0].arg IsLocalDict(input_arg_name, self.keys).visit(node)
null
_on_chain_end
"""Process the Chain Run.""" self._submit(self._update_run_single, _copy(run))
def _on_chain_end(self, run: Run) ->None: """Process the Chain Run.""" self._submit(self._update_run_single, _copy(run))
Process the Chain Run.
test_find_all_links_none
html = '<span>Hello world</span>' actual = find_all_links(html) assert actual == []
def test_find_all_links_none() ->None: html = '<span>Hello world</span>' actual = find_all_links(html) assert actual == []
null
_import_baseten
from langchain_community.llms.baseten import Baseten return Baseten
def _import_baseten() ->Any: from langchain_community.llms.baseten import Baseten return Baseten
null
test_pymupdf_loader
"""Test PyMuPDF loader.""" _assert_with_parser(PyMuPDFParser())
@pytest.mark.requires('fitz') def test_pymupdf_loader() ->None: """Test PyMuPDF loader.""" _assert_with_parser(PyMuPDFParser())
Test PyMuPDF loader.
query
""" This method sends a Cypher query to the connected Neo4j database and returns the results as a list of dictionaries. Args: query (str): The Cypher query to execute. params (dict, optional): Dictionary of query parameters. Defaults to {}. Returns: List[Dict[str, Any]]: List of dictionaries containing the query results. """ from neo4j.exceptions import CypherSyntaxError params = params or {} with self._driver.session(database=self._database) as session: try: data = session.run(query, params) return [r.data() for r in data] except CypherSyntaxError as e: raise ValueError(f'Cypher Statement is not valid\n{e}')
def query(self, query: str, *, params: Optional[dict]=None) ->List[Dict[str, Any]]: """ This method sends a Cypher query to the connected Neo4j database and returns the results as a list of dictionaries. Args: query (str): The Cypher query to execute. params (dict, optional): Dictionary of query parameters. Defaults to {}. Returns: List[Dict[str, Any]]: List of dictionaries containing the query results. """ from neo4j.exceptions import CypherSyntaxError params = params or {} with self._driver.session(database=self._database) as session: try: data = session.run(query, params) return [r.data() for r in data] except CypherSyntaxError as e: raise ValueError(f'Cypher Statement is not valid\n{e}')
This method sends a Cypher query to the connected Neo4j database and returns the results as a list of dictionaries. Args: query (str): The Cypher query to execute. params (dict, optional): Dictionary of query parameters. Defaults to {}. Returns: List[Dict[str, Any]]: List of dictionaries containing the query results.
__init__
"""Initialize a bs4 based HTML parser.""" try: import bs4 except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install beautifulsoup4`' ) self.bs_kwargs = {'features': features, **kwargs} self.get_text_separator = get_text_separator
def __init__(self, *, features: str='lxml', get_text_separator: str='', ** kwargs: Any) ->None: """Initialize a bs4 based HTML parser.""" try: import bs4 except ImportError: raise ImportError( 'beautifulsoup4 package not found, please install it with `pip install beautifulsoup4`' ) self.bs_kwargs = {'features': features, **kwargs} self.get_text_separator = get_text_separator
Initialize a bs4 based HTML parser.
test_api_key_masked_when_passed_via_constructor
llm = EdenAiEmbeddings(edenai_api_key='secret-api-key') print(llm.edenai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = EdenAiEmbeddings(edenai_api_key='secret-api-key') print(llm.edenai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
test_unstructured_pdf_loader_default_mode
"""Test unstructured loader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = UnstructuredPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
def test_unstructured_pdf_loader_default_mode() ->None: """Test unstructured loader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = UnstructuredPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1
Test unstructured loader.
format_messages
"""Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessages. """ return [self.format(**kwargs)]
def format_messages(self, **kwargs: Any) ->List[BaseMessage]: """Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessages. """ return [self.format(**kwargs)]
Format messages from kwargs. Args: **kwargs: Keyword arguments to use for formatting. Returns: List of BaseMessages.
_create_table_if_not_exists
"""Create table if it doesn't exist.""" if self.table_created: return conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} BIGINT PRIMARY KEY AUTO_INCREMENT, {} TEXT NOT NULL, {} JSON NOT NULL);""" .format(self.table_name, self.id_field, self.session_id_field, self.message_field)) self.table_created = True finally: cur.close() finally: conn.close()
def _create_table_if_not_exists(self) ->None: """Create table if it doesn't exist.""" if self.table_created: return conn = self.connection_pool.connect() try: cur = conn.cursor() try: cur.execute( """CREATE TABLE IF NOT EXISTS {} ({} BIGINT PRIMARY KEY AUTO_INCREMENT, {} TEXT NOT NULL, {} JSON NOT NULL);""" .format(self.table_name, self.id_field, self. session_id_field, self.message_field)) self.table_created = True finally: cur.close() finally: conn.close()
Create table if it doesn't exist.
load_output_parser
"""Load an output parser. Args: config: config dict Returns: config dict with output parser loaded """ if 'output_parsers' in config: if config['output_parsers'] is not None: _config = config['output_parsers'] output_parser_type = _config['_type'] if output_parser_type == 'regex_parser': output_parser = RegexParser(**_config) else: raise ValueError(f'Unsupported output parser {output_parser_type}') config['output_parsers'] = output_parser return config
def load_output_parser(config: dict) ->dict: """Load an output parser. Args: config: config dict Returns: config dict with output parser loaded """ if 'output_parsers' in config: if config['output_parsers'] is not None: _config = config['output_parsers'] output_parser_type = _config['_type'] if output_parser_type == 'regex_parser': output_parser = RegexParser(**_config) else: raise ValueError( f'Unsupported output parser {output_parser_type}') config['output_parsers'] = output_parser return config
Load an output parser. Args: config: config dict Returns: config dict with output parser loaded
FakeCreate
def fn(self: Any, **kwargs: Any) ->str: return 'fake_uuid' return fn
def FakeCreate(**args: Any) ->Any: def fn(self: Any, **kwargs: Any) ->str: return 'fake_uuid' return fn
null
create_file
""" Creates a new file on the gitlab repo Parameters: file_query(str): a string which contains the file path and the file contents. The file path is the first line in the string, and the contents are the rest of the string. For example, "hello_world.md # Hello World!" Returns: str: A success or failure message """ file_path = file_query.split('\n')[0] file_contents = file_query[len(file_path) + 2:] try: self.gitlab_repo_instance.files.get(file_path, self.gitlab_branch) return f'File already exists at {file_path}. Use update_file instead' except Exception: data = {'branch': self.gitlab_branch, 'commit_message': 'Create ' + file_path, 'file_path': file_path, 'content': file_contents} self.gitlab_repo_instance.files.create(data) return 'Created file ' + file_path
def create_file(self, file_query: str) ->str: """ Creates a new file on the gitlab repo Parameters: file_query(str): a string which contains the file path and the file contents. The file path is the first line in the string, and the contents are the rest of the string. For example, "hello_world.md # Hello World!" Returns: str: A success or failure message """ file_path = file_query.split('\n')[0] file_contents = file_query[len(file_path) + 2:] try: self.gitlab_repo_instance.files.get(file_path, self.gitlab_branch) return f'File already exists at {file_path}. Use update_file instead' except Exception: data = {'branch': self.gitlab_branch, 'commit_message': 'Create ' + file_path, 'file_path': file_path, 'content': file_contents} self.gitlab_repo_instance.files.create(data) return 'Created file ' + file_path
Creates a new file on the gitlab repo Parameters: file_query(str): a string which contains the file path and the file contents. The file path is the first line in the string, and the contents are the rest of the string. For example, "hello_world.md # Hello World!" Returns: str: A success or failure message
as_field
from redis.commands.search.field import VectorField field_data = super()._fields() if self.block_size is not None: field_data['BLOCK_SIZE'] = self.block_size return VectorField(self.name, self.algorithm, field_data)
def as_field(self) ->VectorField: from redis.commands.search.field import VectorField field_data = super()._fields() if self.block_size is not None: field_data['BLOCK_SIZE'] = self.block_size return VectorField(self.name, self.algorithm, field_data)
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
similarity_search_with_score
"""Search for similar documents to the query string. Args: query (str): The query string to search for. k (int, optional): The number of results to return. Defaults to 4. kwargs (Any): Vector Store specific search parameters. The following are forwarded to the Momento Vector Index: - top_k (int, optional): The number of results to return. Returns: List[Tuple[Document, float]]: A list of tuples of the form (Document, score). """ embedding = self._embedding.embed_query(query) results = self.similarity_search_with_score_by_vector(embedding=embedding, k=k, **kwargs) return results
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any ) ->List[Tuple[Document, float]]: """Search for similar documents to the query string. Args: query (str): The query string to search for. k (int, optional): The number of results to return. Defaults to 4. kwargs (Any): Vector Store specific search parameters. The following are forwarded to the Momento Vector Index: - top_k (int, optional): The number of results to return. Returns: List[Tuple[Document, float]]: A list of tuples of the form (Document, score). """ embedding = self._embedding.embed_query(query) results = self.similarity_search_with_score_by_vector(embedding= embedding, k=k, **kwargs) return results
Search for similar documents to the query string. Args: query (str): The query string to search for. k (int, optional): The number of results to return. Defaults to 4. kwargs (Any): Vector Store specific search parameters. The following are forwarded to the Momento Vector Index: - top_k (int, optional): The number of results to return. Returns: List[Tuple[Document, float]]: A list of tuples of the form (Document, score).