method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_cloudflare_workers_ai_embedding_query
"""Test Cloudflare Workers AI embeddings.""" responses.add(responses.POST, 'https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5' , json={'result': {'shape': [1, 768], 'data': [[0.0] * 768]}, 'success': 'true', 'errors': [], 'messages': []}) document = 'foo bar' embeddings = Cl...
@responses.activate def test_cloudflare_workers_ai_embedding_query() ->None: """Test Cloudflare Workers AI embeddings.""" responses.add(responses.POST, 'https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5' , json={'result': {'shape': [1, 768], 'data': [[0.0] * 768]...
Test Cloudflare Workers AI embeddings.
test_faiss_with_metadatas_and_filter
texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata={'page': 0}), docsearch. index_to_docstore_...
@pytest.mark.requires('faiss') def test_faiss_with_metadatas_and_filter() ->None: texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstor...
null
right_input_mapper
return {'question': d['some_input']}
def right_input_mapper(d: dict) ->dict: return {'question': d['some_input']}
null
_run
"""Use the tool.""" if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) response = page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'. Status code {response.status}...
def _run(self, run_manager: Optional[CallbackManagerForToolRun]=None) ->str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) response = page.go_back() if response: return...
Use the tool.
get_num_tokens
"""Return number of tokens in text.""" return len(text.split())
def get_num_tokens(self, text: str) ->int: """Return number of tokens in text.""" return len(text.split())
Return number of tokens in text.
_get_stackexchange
return StackExchangeTool(api_wrapper=StackExchangeAPIWrapper(**kwargs))
def _get_stackexchange(**kwargs: Any) ->BaseTool: return StackExchangeTool(api_wrapper=StackExchangeAPIWrapper(**kwargs))
null
_convert_text_to_prompt
if isinstance(prompt, StringPromptValue): return StringPromptValue(text=text) elif isinstance(prompt, str): return text elif isinstance(prompt, ChatPromptValue): messages = list(prompt.messages) message = messages[self.chat_message_index] if isinstance(message, HumanMessage): messages[self.c...
def _convert_text_to_prompt(self, prompt: Any, text: str) ->Any: if isinstance(prompt, StringPromptValue): return StringPromptValue(text=text) elif isinstance(prompt, str): return text elif isinstance(prompt, ChatPromptValue): messages = list(prompt.messages) message = messag...
null
_get_golden_query
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
def _get_golden_query(**kwargs: Any) ->BaseTool: return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
null
raise_deprecation
"""Raise deprecation warning if callback_manager is used.""" if values.get('callback_manager') is not None: warnings.warn( 'callback_manager is deprecated. Please use callbacks instead.', DeprecationWarning) values['callbacks'] = values.pop('callback_manager', None) return values
@root_validator() def raise_deprecation(cls, values: Dict) ->Dict: """Raise deprecation warning if callback_manager is used.""" if values.get('callback_manager') is not None: warnings.warn( 'callback_manager is deprecated. Please use callbacks instead.', DeprecationWarning) ...
Raise deprecation warning if callback_manager is used.
_run
"""Execute the query, return the results or an error message.""" return self.db.run_no_throw(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Execute the query, return the results or an error message.""" return self.db.run_no_throw(query)
Execute the query, return the results or an error message.
embeddings
return self._embedding_function
@property def embeddings(self) ->Optional[Embeddings]: return self._embedding_function
null
test_anonymize_multiple
"""Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = ( "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com" ) anonymizer = PresidioReversibleAnonymizer() anonymized_text = anonymizer.anonymize(text) for...
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_anonymize_multiple() ->None: """Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = ( "John Smith's phone number is 313-666-7440 and...
Test anonymizing multiple items in a sentence
_check_response
"""Check the response from the DataForSEO SERP API for errors.""" if response.get('status_code') != 20000: raise ValueError( f"Got error from DataForSEO SERP API: {response.get('status_message')}" ) return response
def _check_response(self, response: dict) ->dict: """Check the response from the DataForSEO SERP API for errors.""" if response.get('status_code') != 20000: raise ValueError( f"Got error from DataForSEO SERP API: {response.get('status_message')}" ) return response
Check the response from the DataForSEO SERP API for errors.
_strip_erroneous_leading_spaces
"""Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == ' ' for line in text.split( '\n')[1:]) if has_leading_space: return text.replace('...
def _strip_erroneous_leading_spaces(text: str) ->str: """Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == ' ' for line in text. ...
Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space.
similarity_search_by_vector
"""Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Docume...
def similarity_search_by_vector(self, embedding: List[float], k: int=4, filter: Optional[dict]=None, **kwargs: Any) ->List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. De...
Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector.
test_banana_call
"""Test valid call to BananaDev.""" llm = Banana() output = llm('Say foo:') assert isinstance(output, str)
def test_banana_call() ->None: """Test valid call to BananaDev.""" llm = Banana() output = llm('Say foo:') assert isinstance(output, str)
Test valid call to BananaDev.
test_valid_final_answer_parse
llm_output = 'Final Answer: The best pizza to eat is margaritta ' agent_finish: AgentFinish = mrkl_output_parser.parse_folder(llm_output) assert agent_finish.return_values.get('output' ) == 'The best pizza to eat is margaritta'
def test_valid_final_answer_parse() ->None: llm_output = 'Final Answer: The best pizza to eat is margaritta ' agent_finish: AgentFinish = mrkl_output_parser.parse_folder(llm_output) assert agent_finish.return_values.get('output' ) == 'The best pizza to eat is margaritta'
null
_huggingface_tokenizer_length
return len(tokenizer.encode(text))
def _huggingface_tokenizer_length(text: str) ->int: return len(tokenizer.encode(text))
null
test_search_filter
"""Test end to end construction and search with metadata filtering.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'first_letter': text[0]} for text in texts] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search('bagel', k=1, wher...
def test_search_filter() ->None: """Test end to end construction and search with metadata filtering.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'first_letter': text[0]} for text in texts] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) ...
Test end to end construction and search with metadata filtering.
_get_user_id
if user_ctx.get() is not None: return user_ctx.get() metadata = metadata or {} user_id = metadata.get('user_id') if user_id is None: user_id = metadata.get('userId') return user_id
def _get_user_id(metadata: Any) ->Any: if user_ctx.get() is not None: return user_ctx.get() metadata = metadata or {} user_id = metadata.get('user_id') if user_id is None: user_id = metadata.get('userId') return user_id
null
_format_results
doc_strings = ['\n'.join([doc.metadata['title'], doc.metadata['description' ]]) for doc in docs if query in doc.metadata['description'] or query in doc.metadata['title']] return '\n\n'.join(doc_strings)
@staticmethod def _format_results(docs: Iterable[Document], query: str) ->str: doc_strings = ['\n'.join([doc.metadata['title'], doc.metadata[ 'description']]) for doc in docs if query in doc.metadata[ 'description'] or query in doc.metadata['title']] return '\n\n'.join(doc_strings)
null
_with_tqdm
"""Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func())
def _with_tqdm(iterable: Iterable[T]) ->Iterator[T]: """Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func())
Wrap an iterable in a tqdm progress bar.
_import_python_tool_PythonREPLTool
raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imp...
def _import_python_tool_PythonREPLTool() ->Any: raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is...
null
_create_template_from_message_type
"""Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template string. Returns: a message prompt template of the appropriate type. """ if message_type in ('...
def _create_template_from_message_type(message_type: str, template: str ) ->BaseMessagePromptTemplate: """Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template...
Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template string. Returns: a message prompt template of the appropriate type.
test_int_metadata
"""Verify int metadata is loaded correctly""" doc = next(doc for doc in docs if doc.metadata['source'] == 'tags_and_frontmatter.md') assert doc.metadata['anInt'] == 15
def test_int_metadata() ->None: """Verify int metadata is loaded correctly""" doc = next(doc for doc in docs if doc.metadata['source'] == 'tags_and_frontmatter.md') assert doc.metadata['anInt'] == 15
Verify int metadata is loaded correctly
test_finish
"""Test standard parsing of agent finish.""" parser = ReActJsonSingleInputOutputParser() _input = """Thought: agent thought here Final Answer: The temperature is 100""" output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': 'The temperature is 100'}, log=_input) assert output == expec...
def test_finish() ->None: """Test standard parsing of agent finish.""" parser = ReActJsonSingleInputOutputParser() _input = ( 'Thought: agent thought here\nFinal Answer: The temperature is 100') output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': 'Th...
Test standard parsing of agent finish.
_import_graphql_tool
from langchain_community.tools.graphql.tool import BaseGraphQLTool return BaseGraphQLTool
def _import_graphql_tool() ->Any: from langchain_community.tools.graphql.tool import BaseGraphQLTool return BaseGraphQLTool
null
get_openai_token_cost_for_model
""" Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Cost in USD. """ model_name = standa...
def get_openai_token_cost_for_model(model_name: str, num_tokens: int, is_completion: bool=False) ->float: """ Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used fo...
Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Cost in USD.
resize_base64_image
""" Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_strin...
def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the re...
Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image.
append_to_last_tokens
self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0)
def append_to_last_tokens(self, token: str) ->None: self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0)
null
find_files
"""Find all MDX files in the given path""" if os.path.isfile(path): yield path return for root, _, files in os.walk(path): for file in files: if file.endswith('.mdx') or file.endswith('.md'): yield os.path.join(root, file)
def find_files(path): """Find all MDX files in the given path""" if os.path.isfile(path): yield path return for root, _, files in os.walk(path): for file in files: if file.endswith('.mdx') or file.endswith('.md'): yield os.path.join(root, file)
Find all MDX files in the given path
test_chat_ernie_bot_with_temperature
chat = ErnieBotChat(model_name='ERNIE-Bot', temperature=1.0) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_chat_ernie_bot_with_temperature() ->None: chat = ErnieBotChat(model_name='ERNIE-Bot', temperature=1.0) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
from_texts
"""Create a Milvus collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collecti...
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str='LangChainCollection', connection_args: dict[str, Any]=DEFAULT_MILVUS_CONNECTION, consistency_level: str='Session', index_params: Optional[dict]=None, search_params: Optio...
Create a Milvus collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. De...
fakepost
def fn(url: str, **kwargs: Any) ->Any: if url.endswith('/processing/upload'): return FakeUploadResponse() elif url.endswith('/processing/push'): return FakePushResponse() else: raise Exception('Invalid POST URL') return fn
def fakepost(**kwargs: Any) ->Any: def fn(url: str, **kwargs: Any) ->Any: if url.endswith('/processing/upload'): return FakeUploadResponse() elif url.endswith('/processing/push'): return FakePushResponse() else: raise Exception('Invalid POST URL') ret...
null
_get_relevant_documents
"""Get documents relevant for a query.""" from google.api_core.exceptions import InvalidArgument search_request = self._create_search_request(query) try: response = self._client.search(search_request) except InvalidArgument as exc: raise type(exc)(exc.message + ' This might be due to engine_data_type no...
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """Get documents relevant for a query.""" from google.api_core.exceptions import InvalidArgument search_request = self._create_search_request(query) try: response = self._client.se...
Get documents relevant for a query.
output_keys
"""Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
@property def output_keys(self) ->List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
Return the output keys. :meta private:
_download_from_gcs
"""Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) return blob.download_as_string()
def _download_from_gcs(self, gcs_location: str) ->str: """Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) b...
Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file.
test_placeholder
"""Used for compiling integration tests without running any real tests.""" pass
@pytest.mark.compile def test_placeholder() ->None: """Used for compiling integration tests without running any real tests.""" pass
Used for compiling integration tests without running any real tests.
test_memory_with_message_store
"""Test the memory with a message store.""" message_history = Neo4jChatMessageHistory(session_id='test-session') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This i...
def test_memory_with_message_store() ->None: """Test the memory with a message store.""" message_history = Neo4jChatMessageHistory(session_id='test-session') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message(...
Test the memory with a message store.
_import_arcee
from langchain_community.llms.arcee import Arcee return Arcee
def _import_arcee() ->Any: from langchain_community.llms.arcee import Arcee return Arcee
null
test_pairwise_string_comparison_chain
llm = FakeLLM(queries={'a': 'This is a rather good answer. Rating: [[9]]', 'b': 'This is a rather bad answer. Rating: [[1]]'}, sequential_responses=True) chain = ScoreStringEvalChain.from_llm(llm=llm) res = chain.evaluate_strings(prediction='I like pie.', input= 'What is your favorite food?') assert res['sc...
def test_pairwise_string_comparison_chain() ->None: llm = FakeLLM(queries={'a': 'This is a rather good answer. Rating: [[9]]', 'b': 'This is a rather bad answer. Rating: [[1]]'}, sequential_responses =True) chain = ScoreStringEvalChain.from_llm(llm=llm) res = chain.evaluate_strings(p...
null
_import_timescalevector
from langchain_community.vectorstores.timescalevector import TimescaleVector return TimescaleVector
def _import_timescalevector() ->Any: from langchain_community.vectorstores.timescalevector import TimescaleVector return TimescaleVector
null
_stream
invocation_params = self._invocation_params(stop, **kwargs) for res in self.client.generate_stream(prompt, **invocation_params): stop_seq_found: Optional[str] = None for stop_seq in invocation_params['stop_sequences']: if stop_seq in res.token.text: stop_seq_found = stop_seq text: Option...
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: invocation_params = self._invocation_params(stop, **kwargs) for res in self.client.generate_stream(prompt, **invocation_params): stop_...
null
input_keys
"""Get the input keys for the chain. Returns: List[str]: The input keys. """ return ['question', 'agent_trajectory', 'answer', 'reference']
@property def input_keys(self) ->List[str]: """Get the input keys for the chain. Returns: List[str]: The input keys. """ return ['question', 'agent_trajectory', 'answer', 'reference']
Get the input keys for the chain. Returns: List[str]: The input keys.
mdelete
"""Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None """ for key in keys: full_path = self._get_full_path(key) if full_path.exists(): full_path.unlink()
def mdelete(self, keys: Sequence[str]) ->None: """Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None """ for key in keys: full_path = self._get_full_path(key) if full_path.ex...
Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None
test_file_toolkit_root_dir
"""Test the FileManagementToolkit root_dir handling.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir) tools = toolkit.get_tools() root_dirs = [tool.root_dir for tool in tools if hasattr(tool, 'root_dir')] assert all(root_dir == temp_dir for root_dir in root_di...
def test_file_toolkit_root_dir() ->None: """Test the FileManagementToolkit root_dir handling.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir) tools = toolkit.get_tools() root_dirs = [tool.root_dir for tool in tools if hasattr(tool, ...
Test the FileManagementToolkit root_dir handling.
_identifying_params
"""Get the identifying parameters.""" return {**{'model_name': self.model_name}, **super()._identifying_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_name': self.model_name}, **super()._identifying_params}
Get the identifying parameters.
format_duplicated_operator
"""Format the operator name with the count""" clean_operator_name = re.sub('[<>]', '', operator_name) clean_operator_name = re.sub('_\\d+$', '', clean_operator_name) if operator_name.startswith('<') and operator_name.endswith('>'): return f'<{clean_operator_name}_{count}>' else: return f'{clean_operator_name}_{...
def format_duplicated_operator(operator_name: str, count: int) ->str: """Format the operator name with the count""" clean_operator_name = re.sub('[<>]', '', operator_name) clean_operator_name = re.sub('_\\d+$', '', clean_operator_name) if operator_name.startswith('<') and operator_name.endswith('>'): ...
Format the operator name with the count
_convert_dict_to_message
role = _dict['role'] content = _dict['content'] if role == 'user': return HumanMessage(content=content) elif role == 'assistant': return AIMessage(content=content) elif role == 'system': return SystemMessage(content=content) else: return ChatMessage(content=content, role=role)
@staticmethod def _convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage: role = _dict['role'] content = _dict['content'] if role == 'user': return HumanMessage(content=content) elif role == 'assistant': return AIMessage(content=content) elif role == 'system': retur...
null
lazy_load
return self._integration._load_data(stream_name=self._stream_name, state= self._state)
def lazy_load(self) ->Iterator[Document]: return self._integration._load_data(stream_name=self._stream_name, state=self._state)
null
input_keys
"""Return the input keys of the chain. Returns: List[str]: The input keys. """ return ['prediction', 'prediction_b']
@property def input_keys(self) ->List[str]: """Return the input keys of the chain. Returns: List[str]: The input keys. """ return ['prediction', 'prediction_b']
Return the input keys of the chain. Returns: List[str]: The input keys.
on_retriever_error
self.on_retriever_error_common()
def on_retriever_error(self, *args: Any, **kwargs: Any) ->Any: self.on_retriever_error_common()
null
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaul...
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, filter: Optional[Dict[str, str]]=None, ** kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query...
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Docum...
on_agent_finish
"""Do nothing.""" pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None: """Do nothing.""" pass
Do nothing.
get_schema
return db.get_table_info()
def get_schema(_): return db.get_table_info()
null
_drop_collection
""" Drop the collection from storage. This is meant as an internal-usage method, no members are set other than actual deletion on the backend. """ _ = self.astra_db.delete_collection(collection_name=self.collection_name) return None
def _drop_collection(self) ->None: """ Drop the collection from storage. This is meant as an internal-usage method, no members are set other than actual deletion on the backend. """ _ = self.astra_db.delete_collection(collection_name=self.collection_name) return None
Drop the collection from storage. This is meant as an internal-usage method, no members are set other than actual deletion on the backend.
_construct_scratchpad
"""Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage(content=self.template_tool_response.format (observation=observation...
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction, str]]) ->List[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(conte...
Construct the scratchpad that lets the agent continue its thought process.
on_llm_error
if parent_run_id is None: self.increment()
def on_llm_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: if parent_run_id is None: self.increment()
null
details_of_games
games = self.steam.apps.search_games(name) info_partOne_dict = self.get_id_link_price(games) info_partOne = self.parse_to_str(info_partOne_dict) id = str(info_partOne_dict.get('id')) info_dict = self.steam.apps.get_app_details(id) data = info_dict.get(id).get('data') detailed_description = data.get('detailed_descriptio...
def details_of_games(self, name: str) ->str: games = self.steam.apps.search_games(name) info_partOne_dict = self.get_id_link_price(games) info_partOne = self.parse_to_str(info_partOne_dict) id = str(info_partOne_dict.get('id')) info_dict = self.steam.apps.get_app_details(id) data = info_dict.get...
null
similarity_search_with_score
"""Run similarity search with score. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", ...
def similarity_search_with_score(self, query: str, k: int=DEFAULT_TOP_K, filter: Optional[Dict[str, Any]]=None, brute_force: bool=False, fraction_lists_to_search: Optional[float]=None, **kwargs: Any) ->List[Tuple [Document, float]]: """Run similarity search with score. Args: query: ...
Run similarity search with score. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_for...
yield_blobs
"""Yield blobs that match the requested pattern.""" iterator = _make_iterator(length_func=self.count_matching_files, show_progress=self.show_progress) for path in iterator(self._yield_paths()): yield Blob.from_path(path)
def yield_blobs(self) ->Iterable[Blob]: """Yield blobs that match the requested pattern.""" iterator = _make_iterator(length_func=self.count_matching_files, show_progress=self.show_progress) for path in iterator(self._yield_paths()): yield Blob.from_path(path)
Yield blobs that match the requested pattern.
tool_run_logging_kwargs
return {'llm_prefix': self.llm_prefix, 'observation_prefix': self. observation_prefix}
def tool_run_logging_kwargs(self) ->Dict: return {'llm_prefix': self.llm_prefix, 'observation_prefix': self. observation_prefix}
null
test_jinachat_api_key_masked_when_passed_via_constructor
"""Test initialization with an API key provided via the initializer""" llm = JinaChat(jinachat_api_key='secret-api-key') print(llm.jinachat_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_jinachat_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) ->None: """Test initialization with an API key provided via the initializer""" llm = JinaChat(jinachat_api_key='secret-api-key') print(llm.jinachat_api_key, end='') captured = capsys.readouterr() assert captured...
Test initialization with an API key provided via the initializer
load
"""Load documents.""" try: from google.cloud import storage except ImportError: raise ImportError( 'Could not import google-cloud-storage python package. Please install it with `pip install google-cloud-storage`.' ) storage_client = storage.Client(self.project_name, client_info= get_client_i...
def load(self) ->List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ImportError( 'Could not import google-cloud-storage python package. Please install it with `pip install google-cloud-storage`.' ) storage_client ...
Load documents.
standardize_model_name
""" Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Standardized model name. """ model_name = model_n...
def standardize_model_name(model_name: str, is_completion: bool=False) ->str: """ Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to F...
Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Standardized model name.
_construct_scratchpad
agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError('agent_scratchpad should be of type string.') if agent_scratchpad: return f"""This was your previous work (but I haven't seen any of it! I only see what you return as final answer): {a...
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction, str]]) ->str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError('agent_scratchpad should be of type string.') if agent_scratchpad: ret...
null
batch
if not inputs: return [] keys = [input['key'] for input in inputs] actual_inputs = [input['input'] for input in inputs] if any(key not in self.runnables for key in keys): raise ValueError('One or more keys do not have a corresponding runnable') def invoke(runnable: Runnable, input: Input, config: RunnableConfig...
def batch(self, inputs: List[RouterInput], config: Optional[Union[ RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool =False, **kwargs: Optional[Any]) ->List[Output]: if not inputs: return [] keys = [input['key'] for input in inputs] actual_inputs = [input['input'] for i...
null
_run
"""Use the Sleep tool.""" sleep(sleep_time) return f'Agent slept for {sleep_time} seconds.'
def _run(self, sleep_time: int, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Use the Sleep tool.""" sleep(sleep_time) return f'Agent slept for {sleep_time} seconds.'
Use the Sleep tool.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_call_before_predict
context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." ) if len(list(actions.values())) > 1: raise ValueError( ...
def _call_before_predict(self, inputs: Dict[str, Any]) ->PickBestEvent: context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to s...
null
json
return {'uuid': 'fake_uuid'}
def json(self) ->Any: return {'uuid': 'fake_uuid'}
null
test_loading_from_JSON
"""Test loading from json file.""" prompt = load_prompt(EXAMPLE_DIR / 'simple_prompt.json') expected_prompt = PromptTemplate(input_variables=['adjective', 'content'], template='Tell me a {adjective} joke about {content}.') assert prompt == expected_prompt
def test_loading_from_JSON() ->None: """Test loading from json file.""" prompt = load_prompt(EXAMPLE_DIR / 'simple_prompt.json') expected_prompt = PromptTemplate(input_variables=['adjective', 'content'], template='Tell me a {adjective} joke about {content}.') assert prompt == expected_prompt
Test loading from json file.
_stream
res = self._chat(messages, **kwargs) default_chunk_class = AIMessageChunk for chunk in res.iter_lines(): response = json.loads(chunk) if 'error' in response: raise ValueError(f'Error from Hunyuan api response: {response}') for choice in response['choices']: chunk = _convert_delta_to_message_...
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: res = self._chat(messages, **kwargs) default_chunk_class = AIMessageChunk for chunk in res.iter_lines(): resp...
null
_out_file_path
"""Return the path to the file containing the documentation.""" return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
def _out_file_path(package_name: str='langchain') ->Path: """Return the path to the file containing the documentation.""" return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
Return the path to the file containing the documentation.
test_load_no_result
docs = api_client.load( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) assert not docs
def test_load_no_result(api_client: WikipediaAPIWrapper) ->None: docs = api_client.load( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) assert not docs
null
on_retriever_error_common
self.errors += 1 self.retriever_errors += 1
def on_retriever_error_common(self) ->None: self.errors += 1 self.retriever_errors += 1
null
test_extract_sub_links
html = ( '<a href="https://foobar.com">one</a><a href="http://baz.net">two</a><a href="//foobar.com/hello">three</a><a href="/how/are/you/doing">four</a>' ) expected = sorted(['https://foobar.com', 'https://foobar.com/hello', 'https://foobar.com/how/are/you/doing']) actual = sorted(extract_sub_links(html, '...
def test_extract_sub_links() ->None: html = ( '<a href="https://foobar.com">one</a><a href="http://baz.net">two</a><a href="//foobar.com/hello">three</a><a href="/how/are/you/doing">four</a>' ) expected = sorted(['https://foobar.com', 'https://foobar.com/hello', 'https://foobar.com/how/a...
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
test_multiple_history
"""Tests multiple history works.""" chat = ChatTongyi() response = chat(messages=[HumanMessage(content='Hello.'), AIMessage(content ='Hello!'), HumanMessage(content='How are you doing?')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
def test_multiple_history() ->None: """Tests multiple history works.""" chat = ChatTongyi() response = chat(messages=[HumanMessage(content='Hello.'), AIMessage( content='Hello!'), HumanMessage(content='How are you doing?')]) assert isinstance(response, BaseMessage) assert isinstance(response...
Tests multiple history works.
transform
yield from self.bound.transform(input, self._merge_configs(config), **{** self.kwargs, **kwargs})
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig ]=None, **kwargs: Any) ->Iterator[Output]: yield from self.bound.transform(input, self._merge_configs(config), **{ **self.kwargs, **kwargs})
null
_alert_unsupported_spec
"""Alert if the spec is not supported.""" warning_message = (' This may result in degraded performance.' + ' Convert your OpenAPI spec to 3.1.* spec' + ' for better support.') swagger_version = obj.get('swagger') openapi_version = obj.get('openapi') if isinstance(openapi_version, str): if openapi_version != '3....
@staticmethod def _alert_unsupported_spec(obj: dict) ->None: """Alert if the spec is not supported.""" warning_message = (' This may result in degraded performance.' + ' Convert your OpenAPI spec to 3.1.* spec' + ' for better support.') swagger_version = obj.get('swagger') openapi_version = obj....
Alert if the spec is not supported.
ignore_chain
"""Whether to ignore chain callbacks.""" return False
@property def ignore_chain(self) ->bool: """Whether to ignore chain callbacks.""" return False
Whether to ignore chain callbacks.
test_qdrant_add_texts_stores_embeddings_as_named_vectors
"""Test end to end Qdrant.add_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest collection_name = uuid.uuid4().hex client = QdrantClient(':memory:') client.recreate_collection(collection_name, vectors_config={vector_name: rest.Vec...
@pytest.mark.parametrize('vector_name', ['custom-vector']) def test_qdrant_add_texts_stores_embeddings_as_named_vectors(vector_name: str ) ->None: """Test end to end Qdrant.add_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient from qdrant_client.http import models...
Test end to end Qdrant.add_texts stores named vectors if name is provided.
_stream
params = self._prepare_params(stop=stop, stream=True, **kwargs) if self._is_gemini_model: history_gemini = _parse_chat_history_gemini(messages, project=self.project) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) responses = chat.send_message(message, stream=True, g...
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: params = self._prepare_params(stop=stop, stream=True, **kwargs) if self._is_gemini_model: history_gemini = _parse...
null
load
iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or set show_progress_bar=False." ) from e iter =...
def load(self) ->List[Document]: iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or ...
null
_parse_front_matter
"""Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) if not match: return {} placeholders: Dict[str, str] = {} replace_template_var = functools.partial(self._replace_template_var, placeholders) fro...
def _parse_front_matter(self, content: str) ->dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) if not match: return {} placeholders: Dict[str, str] = {} replac...
Parse front matter metadata from the content and return it as a dict.
_import_nasa
from langchain_community.utilities.nasa import NasaAPIWrapper return NasaAPIWrapper
def _import_nasa() ->Any: from langchain_community.utilities.nasa import NasaAPIWrapper return NasaAPIWrapper
null
test_nvai_play_embedding_documents
"""Test NVIDIA embeddings for documents.""" documents = ['foo bar'] embedding = NVIDIAEmbeddings(model='nvolveqa_40k') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1024
def test_nvai_play_embedding_documents() ->None: """Test NVIDIA embeddings for documents.""" documents = ['foo bar'] embedding = NVIDIAEmbeddings(model='nvolveqa_40k') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1024
Test NVIDIA embeddings for documents.
load
"""Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self.number_tweets) user = api.get_user(screen_name=username) docs =...
def load(self) ->List[Document]: """Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self. n...
Load tweets.
test_list_keys
"""Test listing keys based on the provided date range.""" assert manager.list_keys() == [] with manager._make_session() as session: session.add(UpsertionRecord(key='key1', updated_at=datetime(2021, 1, 1) .timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key2', updated_at=datetime(2022...
def test_list_keys(manager: SQLRecordManager) ->None: """Test listing keys based on the provided date range.""" assert manager.list_keys() == [] with manager._make_session() as session: session.add(UpsertionRecord(key='key1', updated_at=datetime(2021, 1, 1).timestamp(), namespace='kitten...
Test listing keys based on the provided date range.
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4...
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, metadata: Optional[Dict [str, Any]]=None, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes ...
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch...
test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_emb...
@pytest.mark.requires('vowpal_wabbit_next') def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep( ) ->None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_c...
null
_get_relevant_documents
"""Get documents relevant for a query.""" try: from llama_index.indices.base import BaseGPTIndex from llama_index.response.schema import Response except ImportError: raise ImportError( 'You need to install `pip install llama-index` to use this retriever.') index = cast(BaseGPTIndex, self.index) resp...
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """Get documents relevant for a query.""" try: from llama_index.indices.base import BaseGPTIndex from llama_index.response.schema import Response except ImportError: ra...
Get documents relevant for a query.
delete
self.redis_client.delete(f'{self.full_key_prefix}:{key}')
def delete(self, key: str) ->None: self.redis_client.delete(f'{self.full_key_prefix}:{key}')
null
on_tool_error
"""Run when tool errors.""" self.step += 1 self.errors += 1
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: """Run when tool errors.""" self.step += 1 self.errors += 1
Run when tool errors.
_get_metaphor_search
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) ->BaseTool: return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs) )
null
_assert_with_duplicate_parser
"""PDFPlumber tests to verify that duplicate characters appear or not Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. dedupe: Avoiding the error of duplicate characters if `dedupe=True`. """ blob = Blob.from_p...
def _assert_with_duplicate_parser(parser: BaseBlobParser, dedupe: bool=False ) ->None: """PDFPlumber tests to verify that duplicate characters appear or not Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. ...
PDFPlumber tests to verify that duplicate characters appear or not Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. dedupe: Avoiding the error of duplicate characters if `dedupe=True`.
test_appx_search_with_lucene_filter
"""Test Approximate Search with Lucene Filter.""" lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine='lucene') output = docsearch.similarity_search('foo', k=3, lucene_filter= luc...
def test_appx_search_with_lucene_filter() ->None: """Test Approximate Search with Lucene Filter.""" lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine='lucene') o...
Test Approximate Search with Lucene Filter.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
Get the namespace of the langchain object.
test_timescalevector_delete
"""Test deleting functionality.""" texts = ['bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collectio...
def test_timescalevector_delete() ->None: """Test deleting functionality.""" texts = ['bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name='test_collection', embedding= FakeEmbedding...
Test deleting functionality.