method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_colored_object_prompt
"""Test colored object prompt.""" llm = OpenAI(temperature=0, max_tokens=512) pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None) question = ( 'On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses. If I remove all the pairs of sunglasses from the desk, how m...
def test_colored_object_prompt() ->None: """Test colored object prompt.""" llm = OpenAI(temperature=0, max_tokens=512) pal_chain = PALChain.from_colored_object_prompt(llm, timeout=None) question = ( 'On the desk, you see two blue booklets, two purple booklets, and two yellow pairs of sunglasses....
Test colored object prompt.
__init__
"""Create a new TextSplitter.""" super().__init__(keep_separator=keep_separator, **kwargs) self._separators = separators or ['\n\n', '\n', ' ', ''] self._is_separator_regex = is_separator_regex
def __init__(self, separators: Optional[List[str]]=None, keep_separator: bool=True, is_separator_regex: bool=False, **kwargs: Any) ->None: """Create a new TextSplitter.""" super().__init__(keep_separator=keep_separator, **kwargs) self._separators = separators or ['\n\n', '\n', ' ', ''] self._is_sepa...
Create a new TextSplitter.
test_embedchain_retriever
retriever = EmbedchainRetriever.create() texts = ['This document is about John'] for text in texts: retriever.add_texts(text) docs = retriever.get_relevant_documents('doc about john') assert len(docs) == 1 for doc in docs: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata ...
@pytest.mark.requires('embedchain') @patch.object(Pipeline, 'search', return_value=context_value) @patch.object(Pipeline, 'add', return_value=123) def test_embedchain_retriever(mock_add: Any, mock_search: Any) ->None: retriever = EmbedchainRetriever.create() texts = ['This document is about John'] for text ...
null
__init__
"""Initialize with geopandas Dataframe. Args: data_frame: geopandas DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "geometry". """ try: import geopandas as gpd except ImportError: raise ImportError( ...
def __init__(self, data_frame: Any, page_content_column: str='geometry'): """Initialize with geopandas Dataframe. Args: data_frame: geopandas DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "geometry". """ ...
Initialize with geopandas Dataframe. Args: data_frame: geopandas DataFrame object. page_content_column: Name of the column containing the page content. Defaults to "geometry".
__repr__
"""Text representation for ClickHouse Vector Store, prints backends, username and schemas. Easy to use with `str(ClickHouse())` Returns: repr: string to show connection info and data schema """ _repr = f'\x1b[92m\x1b[1m{self.config.database}.{self.config.table} @ ' _repr += f'{s...
def __repr__(self) ->str: """Text representation for ClickHouse Vector Store, prints backends, username and schemas. Easy to use with `str(ClickHouse())` Returns: repr: string to show connection info and data schema """ _repr = f'\x1b[92m\x1b[1m{self.config.database}.{se...
Text representation for ClickHouse Vector Store, prints backends, username and schemas. Easy to use with `str(ClickHouse())` Returns: repr: string to show connection info and data schema
delete_through_llm
""" A wrapper around `delete` with the LLM being passed. In case the llm(prompt) calls have a `stop` param, you should pass it here """ llm_string = get_prompts({**llm.dict(), **{'stop': stop}}, [])[1] return self.delete(prompt, llm_string=llm_string)
def delete_through_llm(self, prompt: str, llm: LLM, stop: Optional[List[str ]]=None) ->None: """ A wrapper around `delete` with the LLM being passed. In case the llm(prompt) calls have a `stop` param, you should pass it here """ llm_string = get_prompts({**llm.dict(), **{'stop': stop...
A wrapper around `delete` with the LLM being passed. In case the llm(prompt) calls have a `stop` param, you should pass it here
convert_prompt
return self._convert_messages_to_prompt(prompt.to_messages())
def convert_prompt(self, prompt: PromptValue) ->str: return self._convert_messages_to_prompt(prompt.to_messages())
null
__init__
"""Initialize with path.""" self.file_path = path
def __init__(self, path: str): """Initialize with path.""" self.file_path = path
Initialize with path.
parse_date
if date_string is None: return None time_format = '%a %b %d %H:%M:%S %Y %z' return datetime.strptime(date_string, time_format)
def parse_date(date_string: str) ->datetime: if date_string is None: return None time_format = '%a %b %d %H:%M:%S %Y %z' return datetime.strptime(date_string, time_format)
null
_import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool
from langchain_community.tools.azure_cognitive_services import AzureCogsTextAnalyticsHealthTool return AzureCogsTextAnalyticsHealthTool
def _import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool() ->Any: from langchain_community.tools.azure_cognitive_services import AzureCogsTextAnalyticsHealthTool return AzureCogsTextAnalyticsHealthTool
null
test_selector_add_example
"""Test LengthBasedExampleSelector can add an example.""" new_example = {'question': """Question: what are you? Answer: bar"""} selector.add_example(new_example) short_question = 'Short question?' output = selector.select_examples({'question': short_question}) assert output == EXAMPLES + [new_example]
def test_selector_add_example(selector: LengthBasedExampleSelector) ->None: """Test LengthBasedExampleSelector can add an example.""" new_example = {'question': 'Question: what are you?\nAnswer: bar'} selector.add_example(new_example) short_question = 'Short question?' output = selector.select_examp...
Test LengthBasedExampleSelector can add an example.
get_name
name = (name or self.name or f"RunnablePick<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>" ) return super().get_name(suffix, name=name)
def get_name(self, suffix: Optional[str]=None, *, name: Optional[str]=None ) ->str: name = (name or self.name or f"RunnablePick<{','.join([self.keys] if isinstance(self.keys, str) else self.keys)}>" ) return super().get_name(suffix, name=name)
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'messages']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'messages']
Get the namespace of the langchain object.
_create_retry_decorator
"""Returns a tenacity retry decorator.""" multiplier = 1 min_seconds = 1 max_seconds = 4 max_retries = 6 return retry(reraise=True, stop=stop_after_attempt(max_retries), wait= wait_exponential(multiplier=multiplier, min=min_seconds, max= max_seconds), before_sleep=before_sleep_log(logger, logging.WARNING))
def _create_retry_decorator() ->Callable[[Any], Any]: """Returns a tenacity retry decorator.""" multiplier = 1 min_seconds = 1 max_seconds = 4 max_retries = 6 return retry(reraise=True, stop=stop_after_attempt(max_retries), wait= wait_exponential(multiplier=multiplier, min=min_seconds, m...
Returns a tenacity retry decorator.
from_texts
"""Construct Typesense wrapper from raw text.""" if typesense_client: vectorstore = cls(typesense_client, embedding, **kwargs) elif typesense_client_params: vectorstore = cls.from_client_params(embedding, ** typesense_client_params, **kwargs) else: raise ValueError( 'Must specify one of type...
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, typesense_client: Optional[Client]=None, typesense_client_params: Optional[dict]=None, typesense_collection_name: Optional[str]=None, text_key: str='text', **kwa...
Construct Typesense wrapper from raw text.
_get_elements
from unstructured.partition.org import partition_org return partition_org(filename=self.file_path, **self.unstructured_kwargs)
def _get_elements(self) ->List: from unstructured.partition.org import partition_org return partition_org(filename=self.file_path, **self.unstructured_kwargs)
null
test_sequential_usage_multiple_outputs
"""Test sequential usage on multiple output chains.""" chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar', 'test']) chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=['baz']) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo']) output = chain({'foo': '123'}) e...
def test_sequential_usage_multiple_outputs() ->None: """Test sequential usage on multiple output chains.""" chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar', 'test']) chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=[ 'baz']) chain = SequentialChai...
Test sequential usage on multiple output chains.
_call
"""Call out to Minimax's completion endpoint to chat Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = minimax("Tell me a joke.") """ request = self._...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to Minimax's completion endpoint to chat Args: prompt: The prompt to pass into the model. Returns: The string generated by ...
Call out to Minimax's completion endpoint to chat Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = minimax("Tell me a joke.")
embed_documents
"""Generate embeddings for documents using FastEmbed. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings: List[np.ndarray] if self.doc_embed_type == 'passage': embeddings = self._model.passage_embed(texts) else: ...
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Generate embeddings for documents using FastEmbed. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings: List[np.ndarray] if self.doc_embed...
Generate embeddings for documents using FastEmbed. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
test_marqo_add_texts
marqo_search = Marqo(client=client, index_name=INDEX_NAME) ids1 = marqo_search.add_texts(['1', '2', '3']) assert len(ids1) == 3 ids2 = marqo_search.add_texts(['1', '2', '3']) assert len(ids2) == 3 assert len(set(ids1).union(set(ids2))) == 6
def test_marqo_add_texts(client: Marqo) ->None: marqo_search = Marqo(client=client, index_name=INDEX_NAME) ids1 = marqo_search.add_texts(['1', '2', '3']) assert len(ids1) == 3 ids2 = marqo_search.add_texts(['1', '2', '3']) assert len(ids2) == 3 assert len(set(ids1).union(set(ids2))) == 6
null
_on_retriever_end
"""Process the Retriever Run."""
def _on_retriever_end(self, run: Run) ->None: """Process the Retriever Run."""
Process the Retriever Run.
set_cluster_id
if v and values['endpoint_name']: raise ValueError('Cannot set both endpoint_name and cluster_id.') elif values['endpoint_name']: return None elif v: return v else: try: if (v := get_repl_context().clusterId): return v raise ValueError("Context doesn't contain clusterId.") ...
@validator('cluster_id', always=True) def set_cluster_id(cls, v: Any, values: Dict[str, Any]) ->Optional[str]: if v and values['endpoint_name']: raise ValueError('Cannot set both endpoint_name and cluster_id.') elif values['endpoint_name']: return None elif v: return v else: ...
null
test_valid_action_and_action_input_parse
llm_output = """I can use the `foo` tool to achieve the goal. Action: foo Action Input: bar""" agent_action: AgentAction = mrkl_output_parser.parse_folder(llm_output) assert agent_action.tool == 'foo' assert agent_action.tool_input == 'bar'
def test_valid_action_and_action_input_parse() ->None: llm_output = """I can use the `foo` tool to achieve the goal. Action: foo Action Input: bar""" agent_action: AgentAction = mrkl_output_parser.parse_folder(llm_output) assert agent_action.tool == 'foo' assert agent_action.tool_input == 'bar'
null
test_no_arguments_to_delete_by_id
with pytest.raises(Exception) as exception_info: self.invoke_delete_by_id_with_no_args(azure_openai_embeddings, collection) assert str(exception_info.value) == 'No document id provided to delete.'
def test_no_arguments_to_delete_by_id(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: with pytest.raises(Exception) as exception_info: self.invoke_delete_by_id_with_no_args(azure_openai_embeddings, collection) assert str(exception_info.value) == 'No document id ...
null
from_chains
"""User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use as the agent LLM. chains: The chains the MRKL system has access to. **kwargs: parameters to be passed t...
@classmethod def from_chains(cls, llm: BaseLanguageModel, chains: List[ChainConfig], ** kwargs: Any) ->AgentExecutor: """User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use a...
User friendly way to initialize the MRKL chain. This is intended to be an easy way to get up and running with the MRKL chain. Args: llm: The LLM to use as the agent LLM. chains: The chains the MRKL system has access to. **kwargs: parameters to be passed to initialization. Returns: An initialized MRKL...
_get_elements
from unstructured.partition.image import partition_image return partition_image(filename=self.file_path, **self.unstructured_kwargs)
def _get_elements(self) ->List: from unstructured.partition.image import partition_image return partition_image(filename=self.file_path, **self.unstructured_kwargs)
null
add_message
"""Append the message to the record in the local file""" messages = messages_to_dict(self.messages) messages.append(messages_to_dict([message])[0]) self.file_path.write_text(json.dumps(messages))
def add_message(self, message: BaseMessage) ->None: """Append the message to the record in the local file""" messages = messages_to_dict(self.messages) messages.append(messages_to_dict([message])[0]) self.file_path.write_text(json.dumps(messages))
Append the message to the record in the local file
max_marginal_relevance_search_by_vector
""" Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected docs. Examples: >>> data = vector_store.max_marginal_relevance_search_by_vector( ... embedding=<your_embedding>, ...
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, exec_option: Optional[ str]=None, **kwargs: Any) ->List[Document]: """ Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes ...
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected docs. Examples: >>> data = vector_store.max_marginal_relevance_search_by_vector( ... embedding=<your_embedding>, ... fetch_k=<elements_to_fetch_before_mmr_s...
embed_query
"""Compute query embeddings using AwaEmbedding. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.client.Embedding(text)
def embed_query(self, text: str) ->List[float]: """Compute query embeddings using AwaEmbedding. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.client.Embedding(text)
Compute query embeddings using AwaEmbedding. Args: text: The text to embed. Returns: Embeddings for the text.
similarity_search_with_score
"""Return LLMRails documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5 Max 10. alpha: parameter for hybrid search . Returns: List of Documents most similar...
def similarity_search_with_score(self, query: str, k: int=5) ->List[Tuple[ Document, float]]: """Return LLMRails documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5 Max 10. ...
Return LLMRails documents most similar to query, along with scores. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5 Max 10. alpha: parameter for hybrid search . Returns: List of Documents most similar to the query and score for each.
validate_environment
"""Validate that api key and python package exists in environment.""" values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') values['openai_api_base'] = values['openai_api_base'] or os.getenv( 'OPENAI_API_BASE') values['openai_api_type'] = get_from_dict_or_env(values, 'opena...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') values['openai_api_base'] = values['openai_api_base'] or os.getenv( ...
Validate that api key and python package exists in environment.
_identifying_params
return {}
@property def _identifying_params(self) ->Dict[str, Any]: return {}
null
load
return list(self.lazy_load())
def load(self) ->List[Document]: return list(self.lazy_load())
null
mock_create
nonlocal completed completed = True return mock_completion
def mock_create(*args: Any, **kwargs: Any) ->Any: nonlocal completed completed = True return mock_completion
null
__init__
"""Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load. record_handler: A function that takes in a record and an optional id and returns a Document. If None, the record will be used as ...
def __init__(self, config: Mapping[str, Any], stream_name: str, record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None ) ->None: """Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load....
Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load. record_handler: A function that takes in a record and an optional id and returns a Document. If None, the record will be used as the document. Defaults to None. ...
_Set
assert t.elts self.write('{') interleave(lambda : self.write(', '), self.dispatch, t.elts) self.write('}')
def _Set(self, t): assert t.elts self.write('{') interleave(lambda : self.write(', '), self.dispatch, t.elts) self.write('}')
null
extension
return 'bson'
@classmethod def extension(cls) ->str: return 'bson'
null
on_llm_start_common
self.llm_starts += 1 self.starts += 1
def on_llm_start_common(self) ->None: self.llm_starts += 1 self.starts += 1
null
test_pymupdf_loader
"""Test PyMuPDF loader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PyMuPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__).parent.parent / 'examples/layout-parser-paper.pdf' loader = PyMuPDFLoader(str(file_path)) docs = loader.load() assert le...
def test_pymupdf_loader() ->None: """Test PyMuPDF loader.""" file_path = Path(__file__).parent.parent / 'examples/hello.pdf' loader = PyMuPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 file_path = Path(__file__ ).parent.parent / 'examples/layout-parser-paper.pdf' ...
Test PyMuPDF loader.
_type
return 'openai-functions-agent'
@property def _type(self) ->str: return 'openai-functions-agent'
null
_get_eval_input
"""Get the evaluation input.""" input_ = {'input': input, 'output': prediction} if self.requires_reference: input_['reference'] = reference return input_
def _get_eval_input(self, prediction: str, reference: Optional[str], input: Optional[str]) ->dict: """Get the evaluation input.""" input_ = {'input': input, 'output': prediction} if self.requires_reference: input_['reference'] = reference return input_
Get the evaluation input.
_import_milvus
from langchain_community.vectorstores.milvus import Milvus return Milvus
def _import_milvus() ->Any: from langchain_community.vectorstores.milvus import Milvus return Milvus
null
delete
"""Delete entity value from store.""" pass
@abstractmethod def delete(self, key: str) ->None: """Delete entity value from store.""" pass
Delete entity value from store.
test_huggingface_endpoint_text_generation
"""Test valid call to HuggingFace text generation model.""" llm = HuggingFaceEndpoint(endpoint_url='', task='text-generation', model_kwargs={'max_new_tokens': 10}) output = llm('Say foo:') print(output) assert isinstance(output, str)
@unittest.skip( 'This test requires an inference endpoint. Tested with Hugging Face endpoints' ) def test_huggingface_endpoint_text_generation() ->None: """Test valid call to HuggingFace text generation model.""" llm = HuggingFaceEndpoint(endpoint_url='', task='text-generation', model_kwargs={'m...
Test valid call to HuggingFace text generation model.
run
"""Query the Brave search engine and return the results as a JSON string. Args: query: The query to search for. Returns: The results as a JSON string. """ web_search_results = self._search_request(query=query) final_results = [{'title': item.get('title'), 'link': item.get('url'), ...
def run(self, query: str) ->str: """Query the Brave search engine and return the results as a JSON string. Args: query: The query to search for. Returns: The results as a JSON string. """ web_search_results = self._search_request(query=query) final_results = [{'title':...
Query the Brave search engine and return the results as a JSON string. Args: query: The query to search for. Returns: The results as a JSON string.
_call
"""Call the Yandex GPT model and return the output. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python ...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call the Yandex GPT model and return the output. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use ...
Call the Yandex GPT model and return the output. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = YandexGPT("Tell me a joke.")
validate_environment
"""Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY') try: from aleph_alpha_client import Client values['client'] = Client(token=aleph_alpha_api_key, host=values['host' ], hosting=values[...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY') try: from aleph_alpha_client import Client valu...
Validate that api key and python package exists in environment.
completion_with_retry
"""Use tenacity to retry the completion call.""" import fireworks.client retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @conditional_decorator(use_retry, retry_decorator) def _completion_with_retry(**kwargs: Any) ->Any: return fireworks.client.Completion.create(**kwargs) return _completion_...
def completion_with_retry(llm: Fireworks, use_retry: bool, *, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any: """Use tenacity to retry the completion call.""" import fireworks.client retry_decorator = _create_retry_decorator(llm, run_manager=run_manager) @conditional_dec...
Use tenacity to retry the completion call.
pretty_print_str
return title + '\n' + d
def pretty_print_str(title: str, d: str) ->str: return title + '\n' + d
null
test__convert_dict_to_message_ai
message = {'role': 'assistant', 'content': 'foo'} result = convert_dict_to_message(message) expected_output = AIMessage(content='foo') assert result == expected_output
def test__convert_dict_to_message_ai() ->None: message = {'role': 'assistant', 'content': 'foo'} result = convert_dict_to_message(message) expected_output = AIMessage(content='foo') assert result == expected_output
null
_import_file_management_FileSearchTool
from langchain_community.tools.file_management import FileSearchTool return FileSearchTool
def _import_file_management_FileSearchTool() ->Any: from langchain_community.tools.file_management import FileSearchTool return FileSearchTool
null
__getitem__
return getattr(self, item)
def __getitem__(self, item: str) ->Any: return getattr(self, item)
null
add_texts
if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError('Collect...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadat...
null
_embed_query
if isinstance(self.embedding_function, Embeddings): return self.embedding_function.embed_query(text) else: return self.embedding_function(text)
def _embed_query(self, text: str) ->List[float]: if isinstance(self.embedding_function, Embeddings): return self.embedding_function.embed_query(text) else: return self.embedding_function(text)
null
output_keys
return []
@property def output_keys(self) ->List[str]: return []
null
test_baichuan_key_masked_when_passed_from_env
"""Test initialization with an API key provided via an env variable""" monkeypatch.setenv('BAICHUAN_API_KEY', 'test-api-key') monkeypatch.setenv('BAICHUAN_SECRET_KEY', 'test-secret-key') chat = ChatBaichuan() print(chat.baichuan_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********' print(c...
def test_baichuan_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch, capsys: CaptureFixture) ->None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv('BAICHUAN_API_KEY', 'test-api-key') monkeypatch.setenv('BAICHUAN_SECRET_KEY', 'test-secret-key') chat ...
Test initialization with an API key provided via an env variable
__init__
"""Initialize a SparkSQL object. Args: spark_session: A SparkSession object. If not provided, one will be created. catalog: The catalog to use. If not provided, the default catalog will be used. schema: The schema to use. If not prov...
def __init__(self, spark_session: Optional[SparkSession]=None, catalog: Optional[str]=None, schema: Optional[str]=None, ignore_tables: Optional [List[str]]=None, include_tables: Optional[List[str]]=None, sample_rows_in_table_info: int=3): """Initialize a SparkSQL object. Args: spark...
Initialize a SparkSQL object. Args: spark_session: A SparkSession object. If not provided, one will be created. catalog: The catalog to use. If not provided, the default catalog will be used. schema: The schema to use. If not provided, the default schema will be used. ignore_tables: A...
completion_with_retry
"""Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(self) @retry_decorator def _completion_with_retry(**_kwargs: Any) ->Any: resp = self.client.call(**_kwargs) return check_response(resp) return _completion_with_retry(**kwargs)
def completion_with_retry(self, **kwargs: Any) ->Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(self) @retry_decorator def _completion_with_retry(**_kwargs: Any) ->Any: resp = self.client.call(**_kwargs) return check_response(resp) re...
Use tenacity to retry the completion call.
_format_dict_to_string
formatted_str = ', '.join([f'{key}: {value}' for key, value in input_dict. items()]) return formatted_str
@staticmethod def _format_dict_to_string(input_dict: Dict) ->str: formatted_str = ', '.join([f'{key}: {value}' for key, value in input_dict.items()]) return formatted_str
null
stringify_dict
"""Stringify a dictionary. Args: data: The dictionary to stringify. Returns: str: The stringified dictionary. """ text = '' for key, value in data.items(): text += key + ': ' + stringify_value(value) + '\n' return text
def stringify_dict(data: dict) ->str: """Stringify a dictionary. Args: data: The dictionary to stringify. Returns: str: The stringified dictionary. """ text = '' for key, value in data.items(): text += key + ': ' + stringify_value(value) + '\n' return text
Stringify a dictionary. Args: data: The dictionary to stringify. Returns: str: The stringified dictionary.
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. document_ids: Optional list of document ids associated with the texts. ...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]]=None, document_ids: Optional[List[str]]=None, **kwargs: Any) ->List[ str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore....
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. document_ids: Optional list of document ids associated with the texts. kwargs: vectorstore specific parameters ...
test_voyage_embedding_query
"""Test voyage embeddings.""" document = 'foo bar' embedding = VoyageEmbeddings(model=MODEL) output = embedding.embed_query(document) assert len(output) == 1024
def test_voyage_embedding_query() ->None: """Test voyage embeddings.""" document = 'foo bar' embedding = VoyageEmbeddings(model=MODEL) output = embedding.embed_query(document) assert len(output) == 1024
Test voyage embeddings.
test_add_texts_not_supported_for_delta_sync_index
index = mock_index(index_details) vectorsearch = default_databricks_vector_search(index) with pytest.raises(ValueError) as ex: vectorsearch.add_texts(fake_texts) assert '`add_texts` is only supported for direct-access index.' in str(ex.value )
@pytest.mark.requires('databricks', 'databricks.vector_search') @pytest.mark.parametrize('index_details', [ DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS, DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS]) def test_add_texts_not_supported_for_delta_sync_index(index_details: dict ) ->None: index = mock_index(index_details...
null
_load_llm_checker_chain
if 'llm' in config: llm_config = config.pop('llm') llm = load_llm_from_config(llm_config) elif 'llm_path' in config: llm = load_llm(config.pop('llm_path')) else: raise ValueError('One of `llm` or `llm_path` must be present.') if 'create_draft_answer_prompt' in config: create_draft_answer_prompt_conf...
def _load_llm_checker_chain(config: dict, **kwargs: Any) ->LLMCheckerChain: if 'llm' in config: llm_config = config.pop('llm') llm = load_llm_from_config(llm_config) elif 'llm_path' in config: llm = load_llm(config.pop('llm_path')) else: raise ValueError('One of `llm` or `llm...
null
from_prompts
"""Convenience constructor for instantiating from destination prompts.""" destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] destinations_str = '\n'.join(destinations) router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations= destinations_str) router_prompt = PromptTemplate(template...
@classmethod def from_prompts(cls, llm: BaseLanguageModel, prompt_infos: List[Dict[str, str]], default_chain: Optional[Chain]=None, **kwargs: Any ) ->MultiPromptChain: """Convenience constructor for instantiating from destination prompts.""" destinations = [f"{p['name']}: {p['description']}" for p in pr...
Convenience constructor for instantiating from destination prompts.
__init__
self.path = path self.glob = glob self.load_hidden = load_hidden self.recursive = recursive self.silent_errors = silent_errors self.extract_images = extract_images
def __init__(self, path: str, glob: str='**/[!.]*.pdf', silent_errors: bool =False, load_hidden: bool=False, recursive: bool=False, extract_images: bool=False): self.path = path self.glob = glob self.load_hidden = load_hidden self.recursive = recursive self.silent_errors = silent_errors ...
null
add_documents
"""Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]): List of documents to add to the vectorstore. Returns: List of ids of the added documents. """ return self.add_texts([document.page_content for document in document...
def add_documents(self, documents: List[Document], **kwargs: Any) ->List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]): List of documents to add to the vectorstore. Returns: List of ids of the added documents...
Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]): List of documents to add to the vectorstore. Returns: List of ids of the added documents.
_wait_for_run
in_progress = True while in_progress: run = self.client.beta.threads.runs.retrieve(run_id, thread_id=thread_id) in_progress = run.status in ('in_progress', 'queued') if in_progress: sleep(self.check_every_ms / 1000) return run
def _wait_for_run(self, run_id: str, thread_id: str) ->Any: in_progress = True while in_progress: run = self.client.beta.threads.runs.retrieve(run_id, thread_id= thread_id) in_progress = run.status in ('in_progress', 'queued') if in_progress: sleep(self.check_ever...
null
validate_environment
"""Validate that api key and endpoint exists in environment.""" bing_subscription_key = get_from_dict_or_env(values, 'bing_subscription_key', 'BING_SUBSCRIPTION_KEY') values['bing_subscription_key'] = bing_subscription_key bing_search_url = get_from_dict_or_env(values, 'bing_search_url', 'BING_SEARCH_URL') valu...
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and endpoint exists in environment.""" bing_subscription_key = get_from_dict_or_env(values, 'bing_subscription_key', 'BING_SUBSCRIPTION_KEY') values['bing_subscription_key'] = bing_subscription_key...
Validate that api key and endpoint exists in environment.
_import_edenai_EdenAiParsingInvoiceTool
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool return EdenAiParsingInvoiceTool
def _import_edenai_EdenAiParsingInvoiceTool() ->Any: from langchain_community.tools.edenai import EdenAiParsingInvoiceTool return EdenAiParsingInvoiceTool
null
_import_lancedb
from langchain_community.vectorstores.lancedb import LanceDB return LanceDB
def _import_lancedb() ->Any: from langchain_community.vectorstores.lancedb import LanceDB return LanceDB
null
_import_file_management_ReadFileTool
from langchain_community.tools.file_management import ReadFileTool return ReadFileTool
def _import_file_management_ReadFileTool() ->Any: from langchain_community.tools.file_management import ReadFileTool return ReadFileTool
null
results
"""Run query through GoogleSearch and return metadata. Args: query: The query to search for. num_results: The number of results to return. search_params: Parameters to be passed on search Returns: A list of dictionaries with the following keys: ...
def results(self, query: str, num_results: int, search_params: Optional[ Dict[str, str]]=None) ->List[Dict]: """Run query through GoogleSearch and return metadata. Args: query: The query to search for. num_results: The number of results to return. search_params: Para...
Run query through GoogleSearch and return metadata. Args: query: The query to search for. num_results: The number of results to return. search_params: Parameters to be passed on search Returns: A list of dictionaries with the following keys: snippet - The description of the result. tit...
on_tool_error
if self.__has_valid_config is False: return try: self.__track_event('tool', 'error', run_id=str(run_id), parent_run_id= str(parent_run_id) if parent_run_id else None, error={'message': str(error), 'stack': traceback.format_exc()}, app_id=self.__app_id) except Exception as e: logger.error(f'[...
def on_tool_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any: if self.__has_valid_config is False: return try: self.__track_event('tool', 'error', run_id=str(run_id), parent_run_id=str(parent_run_id) if parent_run_id e...
null
to_pandas
import pandas as pd return pd.DataFrame(self.history)
def to_pandas(self) ->'pd.DataFrame': import pandas as pd return pd.DataFrame(self.history)
null
__str__
return get_buffer_string(self.messages)
def __str__(self) ->str: return get_buffer_string(self.messages)
null
parse_obj
try: cls._alert_unsupported_spec(obj) return super().parse_obj(obj) except ValidationError as e: new_obj = copy.deepcopy(obj) for error in e.errors(): keys = error['loc'] item = new_obj for key in keys[:-1]: item = item[key] item.pop(keys[-1], None) return...
@classmethod def parse_obj(cls, obj: dict) ->OpenAPISpec: try: cls._alert_unsupported_spec(obj) return super().parse_obj(obj) except ValidationError as e: new_obj = copy.deepcopy(obj) for error in e.errors(): keys = error['loc'] item = new_obj ...
null
embed_with_retry
"""Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) ->Any: return embeddings.embed(*args, **kwargs) return _embed_with_retry(*args, **kwargs)
def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any ) ->Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator() @retry_decorator def _embed_with_retry(*args: Any, **kwargs: Any) ->Any: return embeddings.embed(*args, **kwarg...
Use tenacity to retry the completion call.
__init__
"""Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load. record_handler: A function that takes in a record and an optional id and returns a Document. If None, the record will be used as ...
def __init__(self, config: Mapping[str, Any], stream_name: str, record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None ) ->None: """Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load....
Initializes the loader. Args: config: The config to pass to the source connector. stream_name: The name of the stream to load. record_handler: A function that takes in a record and an optional id and returns a Document. If None, the record will be used as the document. Defaults to None. ...
_call
baseten_api_key = os.environ['BASETEN_API_KEY'] model_id = self.model if self.deployment == 'production': model_url = f'https://model-{model_id}.api.baseten.co/production/predict' elif self.deployment == 'development': model_url = f'https://model-{model_id}.api.baseten.co/development/predict' else: model_ur...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: baseten_api_key = os.environ['BASETEN_API_KEY'] model_id = self.model if self.deployment == 'production': model_url = ( f'https://model-{model_id}....
null
on_llm_new_token
"""Run when LLM generates a new token.""" self.metrics['step'] += 1 self.metrics['llm_streams'] += 1 llm_streams = self.metrics['llm_streams'] resp: Dict[str, Any] = {} resp.update({'action': 'on_llm_new_token', 'token': token}) resp.update(self.metrics) self.jsonf(resp, self.temp_dir, f'llm_new_tokens_{llm_streams}')
def on_llm_new_token(self, token: str, **kwargs: Any) ->None: """Run when LLM generates a new token.""" self.metrics['step'] += 1 self.metrics['llm_streams'] += 1 llm_streams = self.metrics['llm_streams'] resp: Dict[str, Any] = {} resp.update({'action': 'on_llm_new_token', 'token': token}) r...
Run when LLM generates a new token.
_skip_reference_warning
"""Warning to show when reference is ignored.""" return f"""Ignoring reference in {self.__class__.__name__}, as it is not expected. To use references, use the labeled_criteria instead."""
@property def _skip_reference_warning(self) ->str: """Warning to show when reference is ignored.""" return f"""Ignoring reference in {self.__class__.__name__}, as it is not expected. To use references, use the labeled_criteria instead."""
Warning to show when reference is ignored.
_convert_message_to_dict
if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} ...
def _convert_message_to_dict(message: BaseMessage) ->dict: if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMes...
null
get_table_info_no_throw
"""Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as ...
def get_table_info_no_throw(self, table_names: Optional[List[str]]=None) ->str: """Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows wil...
Get information about specified tables. Follows best practices as specified in: Rajkumar et al, 2022 (https://arxiv.org/abs/2204.00498) If `sample_rows_in_table_info`, the specified number of sample rows will be appended to each table description. This can increase performance as demonstrated in the paper.
on_llm_new_token
"""Run on new LLM token. Only available when streaming is enabled.""" llm_run = self._get_run(run_id, run_type='llm') event_kwargs: Dict[str, Any] = {'token': token} if chunk: event_kwargs['chunk'] = chunk llm_run.events.append({'name': 'new_token', 'time': datetime.now(timezone. utc), 'kwargs': event_kwargs}) ...
def on_llm_new_token(self, token: str, *, chunk: Optional[Union[ GenerationChunk, ChatGenerationChunk]]=None, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Run: """Run on new LLM token. Only available when streaming is enabled.""" llm_run = self._get_run(run_id, run_type='llm') ...
Run on new LLM token. Only available when streaming is enabled.
test_load_pupmed_from_universal_entry
pubmed_tool = _load_pubmed_from_universal_entry() search_string = ( 'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature' ) output = pubmed_tool(search_string) test_string = ( 'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature: Findings and Implicati...
def test_load_pupmed_from_universal_entry() ->None: pubmed_tool = _load_pubmed_from_universal_entry() search_string = ( 'Examining the Validity of ChatGPT in Identifying Relevant Nephrology Literature' ) output = pubmed_tool(search_string) test_string = ( 'Examining the Validity ...
null
_identifying_params
"""Get the identifying parameters.""" return {**{'model_name': self.model_name}, **{'gpu': self.gpu}, **{'lang': self.lang}, **self._default_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_name': self.model_name}, **{'gpu': self.gpu}, **{ 'lang': self.lang}, **self._default_params}
Get the identifying parameters.
on_llm_start
for prompt in prompts: self.prompt_records.append(prompt.replace('\n', ''))
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], ** kwargs: Any) ->None: for prompt in prompts: self.prompt_records.append(prompt.replace('\n', ''))
null
_stream
request = Requests(headers=self._headers()) response = request.post(url=self._url(), data=self._body(prompt, {**kwargs, 'stream': True})) self._handle_status(response.status_code, response.text) for line in _parse_stream(response.iter_lines()): chunk = _handle_sse_line(line) if chunk: yield chunk ...
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: request = Requests(headers=self._headers()) response = request.post(url=self._url(), data=self._body(prompt, {** kwargs, 'stream': Tru...
null
_import_azuresearch
from langchain_community.vectorstores.azuresearch import AzureSearch return AzureSearch
def _import_azuresearch() ->Any: from langchain_community.vectorstores.azuresearch import AzureSearch return AzureSearch
null
format_messages
base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs['goals']) ) time_prompt = SystemMessage(content= f"The current time and date is {time.strftime('%c')}") used_tokens = self.token_counter(cast(str, base_prompt.content) ) + self.token_counter(cast(str, time_prompt.content)) memory: VectorS...
def format_messages(self, **kwargs: Any) ->List[BaseMessage]: base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs[ 'goals'])) time_prompt = SystemMessage(content= f"The current time and date is {time.strftime('%c')}") used_tokens = self.token_counter(cast(str, base_prompt.c...
null
memory_variables
return [self.memory_key]
@property def memory_variables(self) ->List[str]: return [self.memory_key]
null
encode
return '[encoded]' + to_encode
def encode(self, to_encode: str) ->str: return '[encoded]' + to_encode
null
return_values
"""Return values of the agent.""" return []
@property def return_values(self) ->List[str]: """Return values of the agent.""" return []
Return values of the agent.
test_generate_stream
"""Test valid call to volc engine.""" llm = VolcEngineMaasLLM(streaming=True) output = llm.stream('tell me a joke') assert isinstance(output, Generator)
def test_generate_stream() ->None: """Test valid call to volc engine.""" llm = VolcEngineMaasLLM(streaming=True) output = llm.stream('tell me a joke') assert isinstance(output, Generator)
Test valid call to volc engine.
delete_document_by_id
"""Removes a Specific Document by Id Args: document_id: The document identifier """ try: from bson.objectid import ObjectId except ImportError as e: raise ImportError( 'Unable to import bson, please install with `pip install bson`.' ) from e if document_id is None: ...
def delete_document_by_id(self, document_id: Optional[str]=None) ->None: """Removes a Specific Document by Id Args: document_id: The document identifier """ try: from bson.objectid import ObjectId except ImportError as e: raise ImportError( 'Unable to...
Removes a Specific Document by Id Args: document_id: The document identifier
init_hnsw
from docarray import BaseDoc from docarray.index import HnswDocumentIndex class MyDoc(BaseDoc): title: str title_embedding: NdArray[32] other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) hnsw_db = HnswDocumentIndex[MyDoc](work_dir=tmp_path) hnsw_db.index([MyDoc(title=f'My document {i}...
@pytest.fixture def init_hnsw(tmp_path: Path) ->Tuple[HnswDocumentIndex, Dict[str, Any], FakeEmbeddings]: from docarray import BaseDoc from docarray.index import HnswDocumentIndex class MyDoc(BaseDoc): title: str title_embedding: NdArray[32] other_emb: NdArray[32] year:...
null
refresh_schema
""" Refreshes the Neo4j graph schema information. """ node_properties = [el['output'] for el in self.query(node_properties_query)] rel_properties = [el['output'] for el in self.query(rel_properties_query)] relationships = [el['output'] for el in self.query(rel_query)] self.structured_schema = {'node_pro...
def refresh_schema(self) ->None: """ Refreshes the Neo4j graph schema information. """ node_properties = [el['output'] for el in self.query(node_properties_query) ] rel_properties = [el['output'] for el in self.query(rel_properties_query)] relationships = [el['output'] for el in ...
Refreshes the Neo4j graph schema information.
list
if item is None: return [] return list(item)
def list(self, item: Any) ->list: if item is None: return [] return list(item)
null
lazy_load
"""Load bibtex file using bibtexparser and get the article texts plus the article metadata. See https://bibtexparser.readthedocs.io/en/master/ Returns: a list of documents with the document.page_content in text format """ try: import fitz except ImportError: raise Im...
def lazy_load(self) ->Iterator[Document]: """Load bibtex file using bibtexparser and get the article texts plus the article metadata. See https://bibtexparser.readthedocs.io/en/master/ Returns: a list of documents with the document.page_content in text format """ try...
Load bibtex file using bibtexparser and get the article texts plus the article metadata. See https://bibtexparser.readthedocs.io/en/master/ Returns: a list of documents with the document.page_content in text format