method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_cloudflare_workers_ai_embedding_query
"""Test Cloudflare Workers AI embeddings.""" responses.add(responses.POST, 'https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5' , json={'result': {'shape': [1, 768], 'data': [[0.0] * 768]}, 'success': 'true', 'errors': [], 'messages': []}) document = 'foo bar' embeddings = CloudflareWorkersAIEmbeddings(account_id='123', api_token='abc') output = embeddings.embed_query(document) assert len(output) == 768
@responses.activate def test_cloudflare_workers_ai_embedding_query() ->None: """Test Cloudflare Workers AI embeddings.""" responses.add(responses.POST, 'https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5' , json={'result': {'shape': [1, 768], 'data': [[0.0] * 768]}, 'success': 'true', 'errors': [], 'messages': []}) document = 'foo bar' embeddings = CloudflareWorkersAIEmbeddings(account_id='123', api_token= 'abc') output = embeddings.embed_query(document) assert len(output) == 768
Test Cloudflare Workers AI embeddings.
test_faiss_with_metadatas_and_filter
texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata={'page': 0}), docsearch. index_to_docstore_id[1]: Document(page_content='bar', metadata={'page': 1}), docsearch.index_to_docstore_id[2]: Document(page_content='baz', metadata={'page': 2})}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foo', k=1, filter={'page': 1}) assert output == [Document(page_content='bar', metadata={'page': 1})]
@pytest.mark.requires('faiss') def test_faiss_with_metadatas_and_filter() ->None: texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata={'page': 0}), docsearch. index_to_docstore_id[1]: Document(page_content='bar', metadata={ 'page': 1}), docsearch.index_to_docstore_id[2]: Document( page_content='baz', metadata={'page': 2})}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foo', k=1, filter={'page': 1}) assert output == [Document(page_content='bar', metadata={'page': 1})]
null
right_input_mapper
return {'question': d['some_input']}
def right_input_mapper(d: dict) ->dict: return {'question': d['some_input']}
null
_run
"""Use the tool.""" if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) response = page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'. Status code {response.status}" ) else: return 'Unable to navigate back; no previous page in the history'
def _run(self, run_manager: Optional[CallbackManagerForToolRun]=None) ->str: """Use the tool.""" if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) response = page.go_back() if response: return ( f"Navigated back to the previous page with URL '{response.url}'. Status code {response.status}" ) else: return 'Unable to navigate back; no previous page in the history'
Use the tool.
get_num_tokens
"""Return number of tokens in text.""" return len(text.split())
def get_num_tokens(self, text: str) ->int: """Return number of tokens in text.""" return len(text.split())
Return number of tokens in text.
_get_stackexchange
return StackExchangeTool(api_wrapper=StackExchangeAPIWrapper(**kwargs))
def _get_stackexchange(**kwargs: Any) ->BaseTool: return StackExchangeTool(api_wrapper=StackExchangeAPIWrapper(**kwargs))
null
_convert_text_to_prompt
if isinstance(prompt, StringPromptValue): return StringPromptValue(text=text) elif isinstance(prompt, str): return text elif isinstance(prompt, ChatPromptValue): messages = list(prompt.messages) message = messages[self.chat_message_index] if isinstance(message, HumanMessage): messages[self.chat_message_index] = HumanMessage(content=text, example=message.example, additional_kwargs=message. additional_kwargs) if isinstance(message, AIMessage): messages[self.chat_message_index] = AIMessage(content=text, example =message.example, additional_kwargs=message.additional_kwargs) return ChatPromptValue(messages=messages) else: raise ValueError( f'Invalid input type {type(input)}. Must be a PromptValue, str, or list of BaseMessages.' )
def _convert_text_to_prompt(self, prompt: Any, text: str) ->Any: if isinstance(prompt, StringPromptValue): return StringPromptValue(text=text) elif isinstance(prompt, str): return text elif isinstance(prompt, ChatPromptValue): messages = list(prompt.messages) message = messages[self.chat_message_index] if isinstance(message, HumanMessage): messages[self.chat_message_index] = HumanMessage(content=text, example=message.example, additional_kwargs=message. additional_kwargs) if isinstance(message, AIMessage): messages[self.chat_message_index] = AIMessage(content=text, example=message.example, additional_kwargs=message. additional_kwargs) return ChatPromptValue(messages=messages) else: raise ValueError( f'Invalid input type {type(input)}. Must be a PromptValue, str, or list of BaseMessages.' )
null
_get_golden_query
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
def _get_golden_query(**kwargs: Any) ->BaseTool: return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
null
raise_deprecation
"""Raise deprecation warning if callback_manager is used.""" if values.get('callback_manager') is not None: warnings.warn( 'callback_manager is deprecated. Please use callbacks instead.', DeprecationWarning) values['callbacks'] = values.pop('callback_manager', None) return values
@root_validator() def raise_deprecation(cls, values: Dict) ->Dict: """Raise deprecation warning if callback_manager is used.""" if values.get('callback_manager') is not None: warnings.warn( 'callback_manager is deprecated. Please use callbacks instead.', DeprecationWarning) values['callbacks'] = values.pop('callback_manager', None) return values
Raise deprecation warning if callback_manager is used.
_run
"""Execute the query, return the results or an error message.""" return self.db.run_no_throw(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Execute the query, return the results or an error message.""" return self.db.run_no_throw(query)
Execute the query, return the results or an error message.
embeddings
return self._embedding_function
@property def embeddings(self) ->Optional[Embeddings]: return self._embedding_function
null
test_anonymize_multiple
"""Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = ( "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com" ) anonymizer = PresidioReversibleAnonymizer() anonymized_text = anonymizer.anonymize(text) for phrase in ['John Smith', '313-666-7440', 'johnsmith@gmail.com']: assert phrase not in anonymized_text
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_anonymize_multiple() ->None: """Test anonymizing multiple items in a sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = ( "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com" ) anonymizer = PresidioReversibleAnonymizer() anonymized_text = anonymizer.anonymize(text) for phrase in ['John Smith', '313-666-7440', 'johnsmith@gmail.com']: assert phrase not in anonymized_text
Test anonymizing multiple items in a sentence
_check_response
"""Check the response from the DataForSEO SERP API for errors.""" if response.get('status_code') != 20000: raise ValueError( f"Got error from DataForSEO SERP API: {response.get('status_message')}" ) return response
def _check_response(self, response: dict) ->dict: """Check the response from the DataForSEO SERP API for errors.""" if response.get('status_code') != 20000: raise ValueError( f"Got error from DataForSEO SERP API: {response.get('status_message')}" ) return response
Check the response from the DataForSEO SERP API for errors.
_strip_erroneous_leading_spaces
"""Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == ' ' for line in text.split( '\n')[1:]) if has_leading_space: return text.replace('\n ', '\n') else: return text
def _strip_erroneous_leading_spaces(text: str) ->str: """Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ has_leading_space = all(not line or line[0] == ' ' for line in text. split('\n')[1:]) if has_leading_space: return text.replace('\n ', '\n') else: return text
Strip erroneous leading spaces from text. The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space.
similarity_search_by_vector
"""Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding= embedding, k=k, filter=filter) return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(self, embedding: List[float], k: int=4, filter: Optional[dict]=None, **kwargs: Any) ->List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding =embedding, k=k, filter=filter) return [doc for doc, _ in docs_and_scores]
Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector.
test_banana_call
"""Test valid call to BananaDev.""" llm = Banana() output = llm('Say foo:') assert isinstance(output, str)
def test_banana_call() ->None: """Test valid call to BananaDev.""" llm = Banana() output = llm('Say foo:') assert isinstance(output, str)
Test valid call to BananaDev.
test_valid_final_answer_parse
llm_output = 'Final Answer: The best pizza to eat is margaritta ' agent_finish: AgentFinish = mrkl_output_parser.parse_folder(llm_output) assert agent_finish.return_values.get('output' ) == 'The best pizza to eat is margaritta'
def test_valid_final_answer_parse() ->None: llm_output = 'Final Answer: The best pizza to eat is margaritta ' agent_finish: AgentFinish = mrkl_output_parser.parse_folder(llm_output) assert agent_finish.return_values.get('output' ) == 'The best pizza to eat is margaritta'
null
_huggingface_tokenizer_length
return len(tokenizer.encode(text))
def _huggingface_tokenizer_length(text: str) ->int: return len(tokenizer.encode(text))
null
test_search_filter
"""Test end to end construction and search with metadata filtering.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'first_letter': text[0]} for text in texts] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search('bagel', k=1, where={'first_letter': 'h'} ) assert output == [Document(page_content='hello bagel', metadata={ 'first_letter': 'h'})] output = txt_search.similarity_search('langchain', k=1, where={ 'first_letter': 'h'}) assert output == [Document(page_content='hello langchain', metadata={ 'first_letter': 'h'})] txt_search.delete_cluster()
def test_search_filter() ->None: """Test end to end construction and search with metadata filtering.""" texts = ['hello bagel', 'hello langchain'] metadatas = [{'first_letter': text[0]} for text in texts] txt_search = Bagel.from_texts(cluster_name='testing', texts=texts, metadatas=metadatas) output = txt_search.similarity_search('bagel', k=1, where={ 'first_letter': 'h'}) assert output == [Document(page_content='hello bagel', metadata={ 'first_letter': 'h'})] output = txt_search.similarity_search('langchain', k=1, where={ 'first_letter': 'h'}) assert output == [Document(page_content='hello langchain', metadata={ 'first_letter': 'h'})] txt_search.delete_cluster()
Test end to end construction and search with metadata filtering.
_get_user_id
if user_ctx.get() is not None: return user_ctx.get() metadata = metadata or {} user_id = metadata.get('user_id') if user_id is None: user_id = metadata.get('userId') return user_id
def _get_user_id(metadata: Any) ->Any: if user_ctx.get() is not None: return user_ctx.get() metadata = metadata or {} user_id = metadata.get('user_id') if user_id is None: user_id = metadata.get('userId') return user_id
null
_format_results
doc_strings = ['\n'.join([doc.metadata['title'], doc.metadata['description' ]]) for doc in docs if query in doc.metadata['description'] or query in doc.metadata['title']] return '\n\n'.join(doc_strings)
@staticmethod def _format_results(docs: Iterable[Document], query: str) ->str: doc_strings = ['\n'.join([doc.metadata['title'], doc.metadata[ 'description']]) for doc in docs if query in doc.metadata[ 'description'] or query in doc.metadata['title']] return '\n\n'.join(doc_strings)
null
_with_tqdm
"""Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func())
def _with_tqdm(iterable: Iterable[T]) ->Iterator[T]: """Wrap an iterable in a tqdm progress bar.""" return tqdm(iterable, total=length_func())
Wrap an iterable in a tqdm progress bar.
_import_python_tool_PythonREPLTool
raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
def _import_python_tool_PythonREPLTool() ->Any: raise ImportError( "This tool has been moved to langchain experiment. This tool has access to a python REPL. For best practices make sure to sandbox this tool. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md To keep using this code as is, install langchain experimental and update relevant imports replacing 'langchain' with 'langchain_experimental'" )
null
_create_template_from_message_type
"""Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template string. Returns: a message prompt template of the appropriate type. """ if message_type in ('human', 'user'): message: BaseMessagePromptTemplate = (HumanMessagePromptTemplate. from_template(template)) elif message_type in ('ai', 'assistant'): message = AIMessagePromptTemplate.from_template(template) elif message_type == 'system': message = SystemMessagePromptTemplate.from_template(template) else: raise ValueError( f"Unexpected message type: {message_type}. Use one of 'human', 'user', 'ai', 'assistant', or 'system'." ) return message
def _create_template_from_message_type(message_type: str, template: str ) ->BaseMessagePromptTemplate: """Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template string. Returns: a message prompt template of the appropriate type. """ if message_type in ('human', 'user'): message: BaseMessagePromptTemplate = (HumanMessagePromptTemplate. from_template(template)) elif message_type in ('ai', 'assistant'): message = AIMessagePromptTemplate.from_template(template) elif message_type == 'system': message = SystemMessagePromptTemplate.from_template(template) else: raise ValueError( f"Unexpected message type: {message_type}. Use one of 'human', 'user', 'ai', 'assistant', or 'system'." ) return message
Create a message prompt template from a message type and template string. Args: message_type: str the type of the message template (e.g., "human", "ai", etc.) template: str the template string. Returns: a message prompt template of the appropriate type.
test_int_metadata
"""Verify int metadata is loaded correctly""" doc = next(doc for doc in docs if doc.metadata['source'] == 'tags_and_frontmatter.md') assert doc.metadata['anInt'] == 15
def test_int_metadata() ->None: """Verify int metadata is loaded correctly""" doc = next(doc for doc in docs if doc.metadata['source'] == 'tags_and_frontmatter.md') assert doc.metadata['anInt'] == 15
Verify int metadata is loaded correctly
test_finish
"""Test standard parsing of agent finish.""" parser = ReActJsonSingleInputOutputParser() _input = """Thought: agent thought here Final Answer: The temperature is 100""" output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': 'The temperature is 100'}, log=_input) assert output == expected_output
def test_finish() ->None: """Test standard parsing of agent finish.""" parser = ReActJsonSingleInputOutputParser() _input = ( 'Thought: agent thought here\nFinal Answer: The temperature is 100') output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': 'The temperature is 100'}, log=_input) assert output == expected_output
Test standard parsing of agent finish.
_import_graphql_tool
from langchain_community.tools.graphql.tool import BaseGraphQLTool return BaseGraphQLTool
def _import_graphql_tool() ->Any: from langchain_community.tools.graphql.tool import BaseGraphQLTool return BaseGraphQLTool
null
get_openai_token_cost_for_model
""" Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Cost in USD. """ model_name = standardize_model_name(model_name, is_completion=is_completion) if model_name not in MODEL_COST_PER_1K_TOKENS: raise ValueError( f'Unknown model: {model_name}. Please provide a valid OpenAI model name.Known models are: ' + ', '.join(MODEL_COST_PER_1K_TOKENS.keys())) return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000)
def get_openai_token_cost_for_model(model_name: str, num_tokens: int, is_completion: bool=False) ->float: """ Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Cost in USD. """ model_name = standardize_model_name(model_name, is_completion=is_completion ) if model_name not in MODEL_COST_PER_1K_TOKENS: raise ValueError( f'Unknown model: {model_name}. Please provide a valid OpenAI model name.Known models are: ' + ', '.join(MODEL_COST_PER_1K_TOKENS.keys())) return MODEL_COST_PER_1K_TOKENS[model_name] * (num_tokens / 1000)
Get the cost in USD for a given model and number of tokens. Args: model_name: Name of the model num_tokens: Number of tokens. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Cost in USD.
resize_base64_image
""" Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode('utf-8')
def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode('utf-8')
Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image.
append_to_last_tokens
self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0)
def append_to_last_tokens(self, token: str) ->None: self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0)
null
find_files
"""Find all MDX files in the given path""" if os.path.isfile(path): yield path return for root, _, files in os.walk(path): for file in files: if file.endswith('.mdx') or file.endswith('.md'): yield os.path.join(root, file)
def find_files(path): """Find all MDX files in the given path""" if os.path.isfile(path): yield path return for root, _, files in os.walk(path): for file in files: if file.endswith('.mdx') or file.endswith('.md'): yield os.path.join(root, file)
Find all MDX files in the given path
test_chat_ernie_bot_with_temperature
chat = ErnieBotChat(model_name='ERNIE-Bot', temperature=1.0) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_chat_ernie_bot_with_temperature() ->None: chat = ErnieBotChat(model_name='ERNIE-Bot', temperature=1.0) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
from_texts
"""Create a Milvus collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Milvus: Milvus Vector Store """ vector_db = cls(embedding_function=embedding, collection_name= collection_name, connection_args=connection_args, consistency_level= consistency_level, index_params=index_params, search_params= search_params, drop_old=drop_old, **kwargs) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str='LangChainCollection', connection_args: dict[str, Any]=DEFAULT_MILVUS_CONNECTION, consistency_level: str='Session', index_params: Optional[dict]=None, search_params: Optional[dict]=None, drop_old: bool=False, **kwargs: Any ) ->Milvus: """Create a Milvus collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Milvus: Milvus Vector Store """ vector_db = cls(embedding_function=embedding, collection_name= collection_name, connection_args=connection_args, consistency_level =consistency_level, index_params=index_params, search_params= search_params, drop_old=drop_old, **kwargs) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db
Create a Milvus collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Milvus: Milvus Vector Store
fakepost
def fn(url: str, **kwargs: Any) ->Any: if url.endswith('/processing/upload'): return FakeUploadResponse() elif url.endswith('/processing/push'): return FakePushResponse() else: raise Exception('Invalid POST URL') return fn
def fakepost(**kwargs: Any) ->Any: def fn(url: str, **kwargs: Any) ->Any: if url.endswith('/processing/upload'): return FakeUploadResponse() elif url.endswith('/processing/push'): return FakePushResponse() else: raise Exception('Invalid POST URL') return fn
null
_get_relevant_documents
"""Get documents relevant for a query.""" from google.api_core.exceptions import InvalidArgument search_request = self._create_search_request(query) try: response = self._client.search(search_request) except InvalidArgument as exc: raise type(exc)(exc.message + ' This might be due to engine_data_type not set correctly.') if self.engine_data_type == 0: chunk_type = ('extractive_answers' if self.get_extractive_answers else 'extractive_segments') documents = self._convert_unstructured_search_response(response.results, chunk_type) elif self.engine_data_type == 1: documents = self._convert_structured_search_response(response.results) elif self.engine_data_type == 2: chunk_type = ('extractive_answers' if self.get_extractive_answers else 'snippets') documents = self._convert_website_search_response(response.results, chunk_type) else: raise NotImplementedError( 'Only data store type 0 (Unstructured), 1 (Structured),or 2 (Website) are supported currently.' + f' Got {self.engine_data_type}') return documents
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """Get documents relevant for a query.""" from google.api_core.exceptions import InvalidArgument search_request = self._create_search_request(query) try: response = self._client.search(search_request) except InvalidArgument as exc: raise type(exc)(exc.message + ' This might be due to engine_data_type not set correctly.') if self.engine_data_type == 0: chunk_type = ('extractive_answers' if self.get_extractive_answers else 'extractive_segments') documents = self._convert_unstructured_search_response(response. results, chunk_type) elif self.engine_data_type == 1: documents = self._convert_structured_search_response(response.results) elif self.engine_data_type == 2: chunk_type = ('extractive_answers' if self.get_extractive_answers else 'snippets') documents = self._convert_website_search_response(response.results, chunk_type) else: raise NotImplementedError( 'Only data store type 0 (Unstructured), 1 (Structured),or 2 (Website) are supported currently.' + f' Got {self.engine_data_type}') return documents
Get documents relevant for a query.
output_keys
"""Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
@property def output_keys(self) ->List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
Return the output keys. :meta private:
_download_from_gcs
"""Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) return blob.download_as_string()
def _download_from_gcs(self, gcs_location: str) ->str: """Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file. """ bucket = self.gcs_client.get_bucket(self.gcs_bucket_name) blob = bucket.blob(gcs_location) return blob.download_as_string()
Downloads from GCS in text format. Args: gcs_location: The location where the file is located. Returns: The string contents of the file.
test_placeholder
"""Used for compiling integration tests without running any real tests.""" pass
@pytest.mark.compile def test_placeholder() ->None: """Used for compiling integration tests without running any real tests.""" pass
Used for compiling integration tests without running any real tests.
test_memory_with_message_store
"""Test the memory with a message store.""" message_history = Neo4jChatMessageHistory(session_id='test-session') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
def test_memory_with_message_store() ->None: """Test the memory with a message store.""" message_history = Neo4jChatMessageHistory(session_id='test-session') memory = ConversationBufferMemory(memory_key='baz', chat_memory= message_history, return_messages=True) memory.chat_memory.add_ai_message('This is me, the AI') memory.chat_memory.add_user_message('This is me, the human') messages = memory.chat_memory.messages messages_json = json.dumps([message_to_dict(msg) for msg in messages]) assert 'This is me, the AI' in messages_json assert 'This is me, the human' in messages_json memory.chat_memory.clear() assert memory.chat_memory.messages == []
Test the memory with a message store.
_import_arcee
from langchain_community.llms.arcee import Arcee return Arcee
def _import_arcee() ->Any: from langchain_community.llms.arcee import Arcee return Arcee
null
test_pairwise_string_comparison_chain
llm = FakeLLM(queries={'a': 'This is a rather good answer. Rating: [[9]]', 'b': 'This is a rather bad answer. Rating: [[1]]'}, sequential_responses=True) chain = ScoreStringEvalChain.from_llm(llm=llm) res = chain.evaluate_strings(prediction='I like pie.', input= 'What is your favorite food?') assert res['score'] == 9 assert res['reasoning'] == 'This is a rather good answer. Rating: [[9]]' with pytest.warns(UserWarning, match=re.escape(chain._skip_reference_warning)): res = chain.evaluate_strings(prediction='I like pie.', input= 'What is your favorite food?', reference='I enjoy pie.') assert res['score'] == 1 assert res['reasoning'] == 'This is a rather bad answer. Rating: [[1]]'
def test_pairwise_string_comparison_chain() ->None: llm = FakeLLM(queries={'a': 'This is a rather good answer. Rating: [[9]]', 'b': 'This is a rather bad answer. Rating: [[1]]'}, sequential_responses =True) chain = ScoreStringEvalChain.from_llm(llm=llm) res = chain.evaluate_strings(prediction='I like pie.', input= 'What is your favorite food?') assert res['score'] == 9 assert res['reasoning'] == 'This is a rather good answer. Rating: [[9]]' with pytest.warns(UserWarning, match=re.escape(chain. _skip_reference_warning)): res = chain.evaluate_strings(prediction='I like pie.', input= 'What is your favorite food?', reference='I enjoy pie.') assert res['score'] == 1 assert res['reasoning'] == 'This is a rather bad answer. Rating: [[1]]'
null
_import_timescalevector
from langchain_community.vectorstores.timescalevector import TimescaleVector return TimescaleVector
def _import_timescalevector() ->Any: from langchain_community.vectorstores.timescalevector import TimescaleVector return TimescaleVector
null
_stream
invocation_params = self._invocation_params(stop, **kwargs) for res in self.client.generate_stream(prompt, **invocation_params): stop_seq_found: Optional[str] = None for stop_seq in invocation_params['stop_sequences']: if stop_seq in res.token.text: stop_seq_found = stop_seq text: Optional[str] = None if res.token.special: text = None elif stop_seq_found: text = res.token.text[:res.token.text.index(stop_seq_found)] else: text = res.token.text if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text) if stop_seq_found: break
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: invocation_params = self._invocation_params(stop, **kwargs) for res in self.client.generate_stream(prompt, **invocation_params): stop_seq_found: Optional[str] = None for stop_seq in invocation_params['stop_sequences']: if stop_seq in res.token.text: stop_seq_found = stop_seq text: Optional[str] = None if res.token.special: text = None elif stop_seq_found: text = res.token.text[:res.token.text.index(stop_seq_found)] else: text = res.token.text if text: chunk = GenerationChunk(text=text) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text) if stop_seq_found: break
null
input_keys
"""Get the input keys for the chain. Returns: List[str]: The input keys. """ return ['question', 'agent_trajectory', 'answer', 'reference']
@property def input_keys(self) ->List[str]: """Get the input keys for the chain. Returns: List[str]: The input keys. """ return ['question', 'agent_trajectory', 'answer', 'reference']
Get the input keys for the chain. Returns: List[str]: The input keys.
mdelete
"""Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None """ for key in keys: full_path = self._get_full_path(key) if full_path.exists(): full_path.unlink()
def mdelete(self, keys: Sequence[str]) ->None: """Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None """ for key in keys: full_path = self._get_full_path(key) if full_path.exists(): full_path.unlink()
Delete the given keys and their associated values. Args: keys (Sequence[str]): A sequence of keys to delete. Returns: None
test_file_toolkit_root_dir
"""Test the FileManagementToolkit root_dir handling.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir) tools = toolkit.get_tools() root_dirs = [tool.root_dir for tool in tools if hasattr(tool, 'root_dir')] assert all(root_dir == temp_dir for root_dir in root_dirs)
def test_file_toolkit_root_dir() ->None: """Test the FileManagementToolkit root_dir handling.""" with TemporaryDirectory() as temp_dir: toolkit = FileManagementToolkit(root_dir=temp_dir) tools = toolkit.get_tools() root_dirs = [tool.root_dir for tool in tools if hasattr(tool, 'root_dir')] assert all(root_dir == temp_dir for root_dir in root_dirs)
Test the FileManagementToolkit root_dir handling.
_identifying_params
"""Get the identifying parameters.""" return {**{'model_name': self.model_name}, **super()._identifying_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_name': self.model_name}, **super()._identifying_params}
Get the identifying parameters.
format_duplicated_operator
"""Format the operator name with the count""" clean_operator_name = re.sub('[<>]', '', operator_name) clean_operator_name = re.sub('_\\d+$', '', clean_operator_name) if operator_name.startswith('<') and operator_name.endswith('>'): return f'<{clean_operator_name}_{count}>' else: return f'{clean_operator_name}_{count}'
def format_duplicated_operator(operator_name: str, count: int) ->str: """Format the operator name with the count""" clean_operator_name = re.sub('[<>]', '', operator_name) clean_operator_name = re.sub('_\\d+$', '', clean_operator_name) if operator_name.startswith('<') and operator_name.endswith('>'): return f'<{clean_operator_name}_{count}>' else: return f'{clean_operator_name}_{count}'
Format the operator name with the count
_convert_dict_to_message
role = _dict['role'] content = _dict['content'] if role == 'user': return HumanMessage(content=content) elif role == 'assistant': return AIMessage(content=content) elif role == 'system': return SystemMessage(content=content) else: return ChatMessage(content=content, role=role)
@staticmethod def _convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage: role = _dict['role'] content = _dict['content'] if role == 'user': return HumanMessage(content=content) elif role == 'assistant': return AIMessage(content=content) elif role == 'system': return SystemMessage(content=content) else: return ChatMessage(content=content, role=role)
null
lazy_load
return self._integration._load_data(stream_name=self._stream_name, state= self._state)
def lazy_load(self) ->Iterator[Document]: return self._integration._load_data(stream_name=self._stream_name, state=self._state)
null
input_keys
"""Return the input keys of the chain. Returns: List[str]: The input keys. """ return ['prediction', 'prediction_b']
@property def input_keys(self) ->List[str]: """Return the input keys of the chain. Returns: List[str]: The input keys. """ return ['prediction', 'prediction_b']
Return the input keys of the chain. Returns: List[str]: The input keys.
on_retriever_error
self.on_retriever_error_common()
def on_retriever_error(self, *args: Any, **kwargs: Any) ->Any: self.on_retriever_error_common()
null
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding, k=k, fetch_k =fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs)
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, filter: Optional[Dict[str, str]]=None, ** kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs)
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents selected by maximal marginal relevance.
on_agent_finish
"""Do nothing.""" pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None: """Do nothing.""" pass
Do nothing.
get_schema
return db.get_table_info()
def get_schema(_): return db.get_table_info()
null
_drop_collection
""" Drop the collection from storage. This is meant as an internal-usage method, no members are set other than actual deletion on the backend. """ _ = self.astra_db.delete_collection(collection_name=self.collection_name) return None
def _drop_collection(self) ->None: """ Drop the collection from storage. This is meant as an internal-usage method, no members are set other than actual deletion on the backend. """ _ = self.astra_db.delete_collection(collection_name=self.collection_name) return None
Drop the collection from storage. This is meant as an internal-usage method, no members are set other than actual deletion on the backend.
_construct_scratchpad
"""Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage(content=self.template_tool_response.format (observation=observation)) thoughts.append(human_message) return thoughts
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction, str]]) ->List[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts: List[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage(content=self.template_tool_response. format(observation=observation)) thoughts.append(human_message) return thoughts
Construct the scratchpad that lets the agent continue its thought process.
on_llm_error
if parent_run_id is None: self.increment()
def on_llm_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: if parent_run_id is None: self.increment()
null
details_of_games
games = self.steam.apps.search_games(name) info_partOne_dict = self.get_id_link_price(games) info_partOne = self.parse_to_str(info_partOne_dict) id = str(info_partOne_dict.get('id')) info_dict = self.steam.apps.get_app_details(id) data = info_dict.get(id).get('data') detailed_description = data.get('detailed_description') detailed_description = self.remove_html_tags(detailed_description) supported_languages = info_dict.get(id).get('data').get('supported_languages') info_partTwo = ('The summary of the game is: ' + detailed_description + '\n' + 'The supported languages of the game are: ' + supported_languages + '\n') info = info_partOne + info_partTwo return info
def details_of_games(self, name: str) ->str: games = self.steam.apps.search_games(name) info_partOne_dict = self.get_id_link_price(games) info_partOne = self.parse_to_str(info_partOne_dict) id = str(info_partOne_dict.get('id')) info_dict = self.steam.apps.get_app_details(id) data = info_dict.get(id).get('data') detailed_description = data.get('detailed_description') detailed_description = self.remove_html_tags(detailed_description) supported_languages = info_dict.get(id).get('data').get( 'supported_languages') info_partTwo = ('The summary of the game is: ' + detailed_description + '\n' + 'The supported languages of the game are: ' + supported_languages + '\n') info = info_partOne + info_partTwo return info
null
similarity_search_with_score
"""Run similarity search with score. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector, with similarity scores. """ emb = self.embedding_model.embed_query(query) return self.similarity_search_with_score_by_vector(emb, k, filter, brute_force, fraction_lists_to_search, **kwargs)
def similarity_search_with_score(self, query: str, k: int=DEFAULT_TOP_K, filter: Optional[Dict[str, Any]]=None, brute_force: bool=False, fraction_lists_to_search: Optional[float]=None, **kwargs: Any) ->List[Tuple [Document, float]]: """Run similarity search with score. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector, with similarity scores. """ emb = self.embedding_model.embed_query(query) return self.similarity_search_with_score_by_vector(emb, k, filter, brute_force, fraction_lists_to_search, **kwargs)
Run similarity search with score. Args: query: search query text. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector, with similarity scores.
yield_blobs
"""Yield blobs that match the requested pattern.""" iterator = _make_iterator(length_func=self.count_matching_files, show_progress=self.show_progress) for path in iterator(self._yield_paths()): yield Blob.from_path(path)
def yield_blobs(self) ->Iterable[Blob]: """Yield blobs that match the requested pattern.""" iterator = _make_iterator(length_func=self.count_matching_files, show_progress=self.show_progress) for path in iterator(self._yield_paths()): yield Blob.from_path(path)
Yield blobs that match the requested pattern.
tool_run_logging_kwargs
return {'llm_prefix': self.llm_prefix, 'observation_prefix': self. observation_prefix}
def tool_run_logging_kwargs(self) ->Dict: return {'llm_prefix': self.llm_prefix, 'observation_prefix': self. observation_prefix}
null
test_jinachat_api_key_masked_when_passed_via_constructor
"""Test initialization with an API key provided via the initializer""" llm = JinaChat(jinachat_api_key='secret-api-key') print(llm.jinachat_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_jinachat_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) ->None: """Test initialization with an API key provided via the initializer""" llm = JinaChat(jinachat_api_key='secret-api-key') print(llm.jinachat_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
Test initialization with an API key provided via the initializer
load
"""Load documents.""" try: from google.cloud import storage except ImportError: raise ImportError( 'Could not import google-cloud-storage python package. Please install it with `pip install google-cloud-storage`.' ) storage_client = storage.Client(self.project_name, client_info= get_client_info('google-cloud-storage')) bucket = storage_client.get_bucket(self.bucket) blob = bucket.blob(self.blob) metadata = bucket.get_blob(self.blob).metadata with tempfile.TemporaryDirectory() as temp_dir: file_path = f'{temp_dir}/{self.blob}' os.makedirs(os.path.dirname(file_path), exist_ok=True) blob.download_to_filename(file_path) loader = self._loader_func(file_path) docs = loader.load() for doc in docs: if 'source' in doc.metadata: doc.metadata['source'] = f'gs://{self.bucket}/{self.blob}' if metadata: doc.metadata.update(metadata) return docs
def load(self) ->List[Document]: """Load documents.""" try: from google.cloud import storage except ImportError: raise ImportError( 'Could not import google-cloud-storage python package. Please install it with `pip install google-cloud-storage`.' ) storage_client = storage.Client(self.project_name, client_info= get_client_info('google-cloud-storage')) bucket = storage_client.get_bucket(self.bucket) blob = bucket.blob(self.blob) metadata = bucket.get_blob(self.blob).metadata with tempfile.TemporaryDirectory() as temp_dir: file_path = f'{temp_dir}/{self.blob}' os.makedirs(os.path.dirname(file_path), exist_ok=True) blob.download_to_filename(file_path) loader = self._loader_func(file_path) docs = loader.load() for doc in docs: if 'source' in doc.metadata: doc.metadata['source'] = f'gs://{self.bucket}/{self.blob}' if metadata: doc.metadata.update(metadata) return docs
Load documents.
standardize_model_name
""" Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Standardized model name. """ model_name = model_name.lower() if '.ft-' in model_name: model_name = model_name.split('.ft-')[0] + '-azure-finetuned' if ':ft-' in model_name: model_name = model_name.split(':')[0] + '-finetuned-legacy' if 'ft:' in model_name: model_name = model_name.split(':')[1] + '-finetuned' if is_completion and (model_name.startswith('gpt-4') or model_name. startswith('gpt-3.5') or model_name.startswith('gpt-35') or 'finetuned' in model_name and 'legacy' not in model_name): return model_name + '-completion' else: return model_name
def standardize_model_name(model_name: str, is_completion: bool=False) ->str: """ Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Standardized model name. """ model_name = model_name.lower() if '.ft-' in model_name: model_name = model_name.split('.ft-')[0] + '-azure-finetuned' if ':ft-' in model_name: model_name = model_name.split(':')[0] + '-finetuned-legacy' if 'ft:' in model_name: model_name = model_name.split(':')[1] + '-finetuned' if is_completion and (model_name.startswith('gpt-4') or model_name. startswith('gpt-3.5') or model_name.startswith('gpt-35') or 'finetuned' in model_name and 'legacy' not in model_name): return model_name + '-completion' else: return model_name
Standardize the model name to a format that can be used in the OpenAI API. Args: model_name: Model name to standardize. is_completion: Whether the model is used for completion or not. Defaults to False. Returns: Standardized model name.
_construct_scratchpad
agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError('agent_scratchpad should be of type string.') if agent_scratchpad: return f"""This was your previous work (but I haven't seen any of it! I only see what you return as final answer): {agent_scratchpad}""" else: return agent_scratchpad
def _construct_scratchpad(self, intermediate_steps: List[Tuple[AgentAction, str]]) ->str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError('agent_scratchpad should be of type string.') if agent_scratchpad: return f"""This was your previous work (but I haven't seen any of it! I only see what you return as final answer): {agent_scratchpad}""" else: return agent_scratchpad
null
batch
if not inputs: return [] keys = [input['key'] for input in inputs] actual_inputs = [input['input'] for input in inputs] if any(key not in self.runnables for key in keys): raise ValueError('One or more keys do not have a corresponding runnable') def invoke(runnable: Runnable, input: Input, config: RunnableConfig) ->Union[ Output, Exception]: if return_exceptions: try: return runnable.invoke(input, config, **kwargs) except Exception as e: return e else: return runnable.invoke(input, config, **kwargs) runnables = [self.runnables[key] for key in keys] configs = get_config_list(config, len(inputs)) with get_executor_for_config(configs[0]) as executor: return cast(List[Output], list(executor.map(invoke, runnables, actual_inputs, configs)))
def batch(self, inputs: List[RouterInput], config: Optional[Union[ RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions: bool =False, **kwargs: Optional[Any]) ->List[Output]: if not inputs: return [] keys = [input['key'] for input in inputs] actual_inputs = [input['input'] for input in inputs] if any(key not in self.runnables for key in keys): raise ValueError( 'One or more keys do not have a corresponding runnable') def invoke(runnable: Runnable, input: Input, config: RunnableConfig ) ->Union[Output, Exception]: if return_exceptions: try: return runnable.invoke(input, config, **kwargs) except Exception as e: return e else: return runnable.invoke(input, config, **kwargs) runnables = [self.runnables[key] for key in keys] configs = get_config_list(config, len(inputs)) with get_executor_for_config(configs[0]) as executor: return cast(List[Output], list(executor.map(invoke, runnables, actual_inputs, configs)))
null
_run
"""Use the Sleep tool.""" sleep(sleep_time) return f'Agent slept for {sleep_time} seconds.'
def _run(self, sleep_time: int, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Use the Sleep tool.""" sleep(sleep_time) return f'Agent slept for {sleep_time} seconds.'
Use the Sleep tool.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_call_before_predict
context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." ) if len(list(actions.values())) > 1: raise ValueError( "Only one variable using 'ToSelectFrom' can be provided in the inputs for the PickBest chain. Please provide only one variable containing a list to select from." ) if not context: raise ValueError( "No variables using 'BasedOn' found in the inputs. Please include at least one variable containing information to base the selected of ToSelectFrom on." ) event = PickBestEvent(inputs=inputs, to_select_from=actions, based_on=context) return event
def _call_before_predict(self, inputs: Dict[str, Any]) ->PickBestEvent: context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." ) if len(list(actions.values())) > 1: raise ValueError( "Only one variable using 'ToSelectFrom' can be provided in the inputs for the PickBest chain. Please provide only one variable containing a list to select from." ) if not context: raise ValueError( "No variables using 'BasedOn' found in the inputs. Please include at least one variable containing information to base the selected of ToSelectFrom on." ) event = PickBestEvent(inputs=inputs, to_select_from=actions, based_on= context) return event
null
json
return {'uuid': 'fake_uuid'}
def json(self) ->Any: return {'uuid': 'fake_uuid'}
null
test_loading_from_JSON
"""Test loading from json file.""" prompt = load_prompt(EXAMPLE_DIR / 'simple_prompt.json') expected_prompt = PromptTemplate(input_variables=['adjective', 'content'], template='Tell me a {adjective} joke about {content}.') assert prompt == expected_prompt
def test_loading_from_JSON() ->None: """Test loading from json file.""" prompt = load_prompt(EXAMPLE_DIR / 'simple_prompt.json') expected_prompt = PromptTemplate(input_variables=['adjective', 'content'], template='Tell me a {adjective} joke about {content}.') assert prompt == expected_prompt
Test loading from json file.
_stream
res = self._chat(messages, **kwargs) default_chunk_class = AIMessageChunk for chunk in res.iter_lines(): response = json.loads(chunk) if 'error' in response: raise ValueError(f'Error from Hunyuan api response: {response}') for choice in response['choices']: chunk = _convert_delta_to_message_chunk(choice['delta'], default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content)
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: res = self._chat(messages, **kwargs) default_chunk_class = AIMessageChunk for chunk in res.iter_lines(): response = json.loads(chunk) if 'error' in response: raise ValueError(f'Error from Hunyuan api response: {response}') for choice in response['choices']: chunk = _convert_delta_to_message_chunk(choice['delta'], default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content)
null
_out_file_path
"""Return the path to the file containing the documentation.""" return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
def _out_file_path(package_name: str='langchain') ->Path: """Return the path to the file containing the documentation.""" return HERE / f"{package_name.replace('-', '_')}_api_reference.rst"
Return the path to the file containing the documentation.
test_load_no_result
docs = api_client.load( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) assert not docs
def test_load_no_result(api_client: WikipediaAPIWrapper) ->None: docs = api_client.load( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) assert not docs
null
on_retriever_error_common
self.errors += 1 self.retriever_errors += 1
def on_retriever_error_common(self) ->None: self.errors += 1 self.retriever_errors += 1
null
test_extract_sub_links
html = ( '<a href="https://foobar.com">one</a><a href="http://baz.net">two</a><a href="//foobar.com/hello">three</a><a href="/how/are/you/doing">four</a>' ) expected = sorted(['https://foobar.com', 'https://foobar.com/hello', 'https://foobar.com/how/are/you/doing']) actual = sorted(extract_sub_links(html, 'https://foobar.com')) assert actual == expected actual = extract_sub_links(html, 'https://foobar.com/hello') expected = ['https://foobar.com/hello'] assert actual == expected actual = sorted(extract_sub_links(html, 'https://foobar.com/hello', prevent_outside=False)) expected = sorted(['https://foobar.com', 'http://baz.net', 'https://foobar.com/hello', 'https://foobar.com/how/are/you/doing']) assert actual == expected
def test_extract_sub_links() ->None: html = ( '<a href="https://foobar.com">one</a><a href="http://baz.net">two</a><a href="//foobar.com/hello">three</a><a href="/how/are/you/doing">four</a>' ) expected = sorted(['https://foobar.com', 'https://foobar.com/hello', 'https://foobar.com/how/are/you/doing']) actual = sorted(extract_sub_links(html, 'https://foobar.com')) assert actual == expected actual = extract_sub_links(html, 'https://foobar.com/hello') expected = ['https://foobar.com/hello'] assert actual == expected actual = sorted(extract_sub_links(html, 'https://foobar.com/hello', prevent_outside=False)) expected = sorted(['https://foobar.com', 'http://baz.net', 'https://foobar.com/hello', 'https://foobar.com/how/are/you/doing']) assert actual == expected
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
test_multiple_history
"""Tests multiple history works.""" chat = ChatTongyi() response = chat(messages=[HumanMessage(content='Hello.'), AIMessage(content ='Hello!'), HumanMessage(content='How are you doing?')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
def test_multiple_history() ->None: """Tests multiple history works.""" chat = ChatTongyi() response = chat(messages=[HumanMessage(content='Hello.'), AIMessage( content='Hello!'), HumanMessage(content='How are you doing?')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
Tests multiple history works.
transform
yield from self.bound.transform(input, self._merge_configs(config), **{** self.kwargs, **kwargs})
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig ]=None, **kwargs: Any) ->Iterator[Output]: yield from self.bound.transform(input, self._merge_configs(config), **{ **self.kwargs, **kwargs})
null
_alert_unsupported_spec
"""Alert if the spec is not supported.""" warning_message = (' This may result in degraded performance.' + ' Convert your OpenAPI spec to 3.1.* spec' + ' for better support.') swagger_version = obj.get('swagger') openapi_version = obj.get('openapi') if isinstance(openapi_version, str): if openapi_version != '3.1.0': logger.warning( f'Attempting to load an OpenAPI {openapi_version} spec. {warning_message}' ) else: pass elif isinstance(swagger_version, str): logger.warning( f'Attempting to load a Swagger {swagger_version} spec. {warning_message}' ) else: raise ValueError( f'Attempting to load an unsupported spec:\n\n{obj}\n{warning_message}')
@staticmethod def _alert_unsupported_spec(obj: dict) ->None: """Alert if the spec is not supported.""" warning_message = (' This may result in degraded performance.' + ' Convert your OpenAPI spec to 3.1.* spec' + ' for better support.') swagger_version = obj.get('swagger') openapi_version = obj.get('openapi') if isinstance(openapi_version, str): if openapi_version != '3.1.0': logger.warning( f'Attempting to load an OpenAPI {openapi_version} spec. {warning_message}' ) else: pass elif isinstance(swagger_version, str): logger.warning( f'Attempting to load a Swagger {swagger_version} spec. {warning_message}' ) else: raise ValueError( f'Attempting to load an unsupported spec:\n\n{obj}\n{warning_message}' )
Alert if the spec is not supported.
ignore_chain
"""Whether to ignore chain callbacks.""" return False
@property def ignore_chain(self) ->bool: """Whether to ignore chain callbacks.""" return False
Whether to ignore chain callbacks.
test_qdrant_add_texts_stores_embeddings_as_named_vectors
"""Test end to end Qdrant.add_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest collection_name = uuid.uuid4().hex client = QdrantClient(':memory:') client.recreate_collection(collection_name, vectors_config={vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE)}) vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings(), vector_name=vector_name) vec_store.add_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet']) assert 5 == client.count(collection_name).count assert all(vector_name in point.vector for point in client.scroll( collection_name, with_vectors=True)[0])
@pytest.mark.parametrize('vector_name', ['custom-vector']) def test_qdrant_add_texts_stores_embeddings_as_named_vectors(vector_name: str ) ->None: """Test end to end Qdrant.add_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient from qdrant_client.http import models as rest collection_name = uuid.uuid4().hex client = QdrantClient(':memory:') client.recreate_collection(collection_name, vectors_config={vector_name: rest.VectorParams(size=10, distance=rest.Distance.COSINE)}) vec_store = Qdrant(client, collection_name, ConsistentFakeEmbeddings(), vector_name=vector_name) vec_store.add_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet']) assert 5 == client.count(collection_name).count assert all(vector_name in point.vector for point in client.scroll( collection_name, with_vectors=True)[0])
Test end to end Qdrant.add_texts stores named vectors if name is provided.
_stream
params = self._prepare_params(stop=stop, stream=True, **kwargs) if self._is_gemini_model: history_gemini = _parse_chat_history_gemini(messages, project=self.project) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) responses = chat.send_message(message, stream=True, generation_config= params) else: question = _get_question(messages) history = _parse_chat_history(messages[:-1]) examples = kwargs.get('examples', None) if examples: params['examples'] = _parse_examples(examples) chat = self._start_chat(history, **params) responses = chat.send_message_streaming(question.content, **params) for response in responses: if run_manager: run_manager.on_llm_new_token(response.text) yield ChatGenerationChunk(message=AIMessageChunk(content=response.text))
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: params = self._prepare_params(stop=stop, stream=True, **kwargs) if self._is_gemini_model: history_gemini = _parse_chat_history_gemini(messages, project=self. project) message = history_gemini.pop() chat = self.client.start_chat(history=history_gemini) responses = chat.send_message(message, stream=True, generation_config=params) else: question = _get_question(messages) history = _parse_chat_history(messages[:-1]) examples = kwargs.get('examples', None) if examples: params['examples'] = _parse_examples(examples) chat = self._start_chat(history, **params) responses = chat.send_message_streaming(question.content, **params) for response in responses: if run_manager: run_manager.on_llm_new_token(response.text) yield ChatGenerationChunk(message=AIMessageChunk(content=response.text) )
null
load
iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or set show_progress_bar=False." ) from e iter = tqdm(iter) return list(iter)
def load(self) ->List[Document]: iter = self.lazy_load() if self.show_progress_bar: try: from tqdm import tqdm except ImportError as e: raise ImportError( "Package tqdm must be installed if show_progress_bar=True. Please install with 'pip install tqdm' or set show_progress_bar=False." ) from e iter = tqdm(iter) return list(iter)
null
_parse_front_matter
"""Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) if not match: return {} placeholders: Dict[str, str] = {} replace_template_var = functools.partial(self._replace_template_var, placeholders) front_matter_text = self.TEMPLATE_VARIABLE_REGEX.sub(replace_template_var, match.group(1)) try: front_matter = yaml.safe_load(front_matter_text) front_matter = self._restore_template_vars(front_matter, placeholders) if 'tags' in front_matter and isinstance(front_matter['tags'], str): front_matter['tags'] = front_matter['tags'].split(', ') return front_matter except yaml.parser.ParserError: logger.warning('Encountered non-yaml frontmatter') return {}
def _parse_front_matter(self, content: str) ->dict: """Parse front matter metadata from the content and return it as a dict.""" if not self.collect_metadata: return {} match = self.FRONT_MATTER_REGEX.search(content) if not match: return {} placeholders: Dict[str, str] = {} replace_template_var = functools.partial(self._replace_template_var, placeholders) front_matter_text = self.TEMPLATE_VARIABLE_REGEX.sub(replace_template_var, match.group(1)) try: front_matter = yaml.safe_load(front_matter_text) front_matter = self._restore_template_vars(front_matter, placeholders) if 'tags' in front_matter and isinstance(front_matter['tags'], str): front_matter['tags'] = front_matter['tags'].split(', ') return front_matter except yaml.parser.ParserError: logger.warning('Encountered non-yaml frontmatter') return {}
Parse front matter metadata from the content and return it as a dict.
_import_nasa
from langchain_community.utilities.nasa import NasaAPIWrapper return NasaAPIWrapper
def _import_nasa() ->Any: from langchain_community.utilities.nasa import NasaAPIWrapper return NasaAPIWrapper
null
test_nvai_play_embedding_documents
"""Test NVIDIA embeddings for documents.""" documents = ['foo bar'] embedding = NVIDIAEmbeddings(model='nvolveqa_40k') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1024
def test_nvai_play_embedding_documents() ->None: """Test NVIDIA embeddings for documents.""" documents = ['foo bar'] embedding = NVIDIAEmbeddings(model='nvolveqa_40k') output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1024
Test NVIDIA embeddings for documents.
load
"""Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self.number_tweets) user = api.get_user(screen_name=username) docs = self._format_tweets(tweets, user) results.extend(docs) return results
def load(self) ->List[Document]: """Load tweets.""" tweepy = _dependable_tweepy_import() api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser()) results: List[Document] = [] for username in self.twitter_users: tweets = api.user_timeline(screen_name=username, count=self. number_tweets) user = api.get_user(screen_name=username) docs = self._format_tweets(tweets, user) results.extend(docs) return results
Load tweets.
test_list_keys
"""Test listing keys based on the provided date range.""" assert manager.list_keys() == [] with manager._make_session() as session: session.add(UpsertionRecord(key='key1', updated_at=datetime(2021, 1, 1) .timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key2', updated_at=datetime(2022, 1, 1) .timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key3', updated_at=datetime(2023, 1, 1) .timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key4', group_id='group1', updated_at= datetime(2024, 1, 1).timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key1', updated_at=datetime(2021, 1, 1) .timestamp(), namespace='puppies')) session.add(UpsertionRecord(key='key5', updated_at=datetime(2021, 1, 1) .timestamp(), namespace='puppies')) session.commit() assert manager.list_keys() == ['key1', 'key2', 'key3', 'key4'] assert manager.list_keys(after=datetime(2022, 2, 1).timestamp()) == ['key3', 'key4'] assert manager.list_keys(before=datetime(2022, 2, 1).timestamp()) == ['key1', 'key2'] assert manager.list_keys(before=datetime(2019, 2, 1).timestamp()) == [] assert manager.list_keys(before=datetime(2022, 2, 1).timestamp(), after= datetime(2021, 11, 1).timestamp()) == ['key2'] assert manager.list_keys(group_ids=['group1', 'group2']) == ['key4'] assert manager.list_keys(group_ids=['group1', 'group2'], before=datetime( 2019, 1, 1).timestamp()) == [] assert manager.list_keys(group_ids=['group1', 'group2'], after=datetime( 2019, 1, 1).timestamp()) == ['key4']
def test_list_keys(manager: SQLRecordManager) ->None: """Test listing keys based on the provided date range.""" assert manager.list_keys() == [] with manager._make_session() as session: session.add(UpsertionRecord(key='key1', updated_at=datetime(2021, 1, 1).timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key2', updated_at=datetime(2022, 1, 1).timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key3', updated_at=datetime(2023, 1, 1).timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key4', group_id='group1', updated_at=datetime(2024, 1, 1).timestamp(), namespace='kittens')) session.add(UpsertionRecord(key='key1', updated_at=datetime(2021, 1, 1).timestamp(), namespace='puppies')) session.add(UpsertionRecord(key='key5', updated_at=datetime(2021, 1, 1).timestamp(), namespace='puppies')) session.commit() assert manager.list_keys() == ['key1', 'key2', 'key3', 'key4'] assert manager.list_keys(after=datetime(2022, 2, 1).timestamp()) == ['key3' , 'key4'] assert manager.list_keys(before=datetime(2022, 2, 1).timestamp()) == [ 'key1', 'key2'] assert manager.list_keys(before=datetime(2019, 2, 1).timestamp()) == [] assert manager.list_keys(before=datetime(2022, 2, 1).timestamp(), after =datetime(2021, 11, 1).timestamp()) == ['key2'] assert manager.list_keys(group_ids=['group1', 'group2']) == ['key4'] assert manager.list_keys(group_ids=['group1', 'group2'], before= datetime(2019, 1, 1).timestamp()) == [] assert manager.list_keys(group_ids=['group1', 'group2'], after=datetime (2019, 1, 1).timestamp()) == ['key4']
Test listing keys based on the provided date range.
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Zep determines this automatically and this parameter is ignored. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata: Optional, metadata to filter the resulting set of retrieved docs Returns: List of Documents selected by maximal marginal relevance. """ if not self._collection: raise ValueError( 'collection should be an instance of a Zep DocumentCollection') results = self._collection.search(embedding=embedding, limit=k, metadata= metadata, search_type='mmr', mmr_lambda=lambda_mult, **kwargs) return [Document(page_content=d.content, metadata=d.metadata) for d in results]
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, metadata: Optional[Dict [str, Any]]=None, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Zep determines this automatically and this parameter is ignored. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata: Optional, metadata to filter the resulting set of retrieved docs Returns: List of Documents selected by maximal marginal relevance. """ if not self._collection: raise ValueError( 'collection should be an instance of a Zep DocumentCollection') results = self._collection.search(embedding=embedding, limit=k, metadata=metadata, search_type='mmr', mmr_lambda=lambda_mult, **kwargs) return [Document(page_content=d.content, metadata=d.metadata) for d in results]
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Zep determines this automatically and this parameter is ignored. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. metadata: Optional, metadata to filter the resulting set of retrieved docs Returns: List of Documents selected by maximal marginal relevance.
test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = 'context1' ctx_str_2 = 'context2' encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) named_actions = {'action1': rl_chain.EmbedAndKeep([{'a': str1, 'b': str1}, str2, str3])} context = {'context1': rl_chain.EmbedAndKeep(ctx_str_1), 'context2': rl_chain.EmbedAndKeep(ctx_str_2)} expected = f"""shared |context1 {ctx_str_1 + ' ' + encoded_ctx_str_1} |context2 {ctx_str_2 + ' ' + encoded_ctx_str_2} 0:-0.0:1.0 |a {str1 + ' ' + encoded_str1} |b {str1 + ' ' + encoded_str1} |action1 {str2 + ' ' + encoded_str2} |action1 {str3 + ' ' + encoded_str3} """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0 ) event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on=context, selected=selected) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep( ) ->None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed= False, model=MockEncoder()) str1 = '0' str2 = '1' str3 = '2' encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = 'context1' ctx_str_2 = 'context2' encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) named_actions = {'action1': rl_chain.EmbedAndKeep([{'a': str1, 'b': str1}, str2, str3])} context = {'context1': rl_chain.EmbedAndKeep(ctx_str_1), 'context2': rl_chain.EmbedAndKeep(ctx_str_2)} expected = f"""shared |context1 {ctx_str_1 + ' ' + encoded_ctx_str_1} |context2 {ctx_str_2 + ' ' + encoded_ctx_str_2} 0:-0.0:1.0 |a {str1 + ' ' + encoded_str1} |b {str1 + ' ' + encoded_str1} |action1 {str2 + ' ' + encoded_str2} |action1 {str3 + ' ' + encoded_str3} """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBestEvent(inputs={}, to_select_from= named_actions, based_on=context, selected=selected) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected
null
_get_relevant_documents
"""Get documents relevant for a query.""" try: from llama_index.indices.base import BaseGPTIndex from llama_index.response.schema import Response except ImportError: raise ImportError( 'You need to install `pip install llama-index` to use this retriever.') index = cast(BaseGPTIndex, self.index) response = index.query(query, response_mode='no_text', **self.query_kwargs) response = cast(Response, response) docs = [] for source_node in response.source_nodes: metadata = source_node.extra_info or {} docs.append(Document(page_content=source_node.source_text, metadata= metadata)) return docs
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """Get documents relevant for a query.""" try: from llama_index.indices.base import BaseGPTIndex from llama_index.response.schema import Response except ImportError: raise ImportError( 'You need to install `pip install llama-index` to use this retriever.' ) index = cast(BaseGPTIndex, self.index) response = index.query(query, response_mode='no_text', **self.query_kwargs) response = cast(Response, response) docs = [] for source_node in response.source_nodes: metadata = source_node.extra_info or {} docs.append(Document(page_content=source_node.source_text, metadata =metadata)) return docs
Get documents relevant for a query.
delete
self.redis_client.delete(f'{self.full_key_prefix}:{key}')
def delete(self, key: str) ->None: self.redis_client.delete(f'{self.full_key_prefix}:{key}')
null
on_tool_error
"""Run when tool errors.""" self.step += 1 self.errors += 1
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: """Run when tool errors.""" self.step += 1 self.errors += 1
Run when tool errors.
_get_metaphor_search
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) ->BaseTool: return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs) )
null
_assert_with_duplicate_parser
"""PDFPlumber tests to verify that duplicate characters appear or not Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. dedupe: Avoiding the error of duplicate characters if `dedupe=True`. """ blob = Blob.from_path(DUPLICATE_CHARS) doc_generator = parser.lazy_parse(blob) assert isinstance(doc_generator, Iterator) docs = list(doc_generator) if dedupe: assert '1000 Series' == docs[0].page_content.split('\n')[0] else: assert '11000000 SSeerriieess' == docs[0].page_content.split('\n')[0]
def _assert_with_duplicate_parser(parser: BaseBlobParser, dedupe: bool=False ) ->None: """PDFPlumber tests to verify that duplicate characters appear or not Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. dedupe: Avoiding the error of duplicate characters if `dedupe=True`. """ blob = Blob.from_path(DUPLICATE_CHARS) doc_generator = parser.lazy_parse(blob) assert isinstance(doc_generator, Iterator) docs = list(doc_generator) if dedupe: assert '1000 Series' == docs[0].page_content.split('\n')[0] else: assert '11000000 SSeerriieess' == docs[0].page_content.split('\n')[0]
PDFPlumber tests to verify that duplicate characters appear or not Args: parser (BaseBlobParser): The parser to test. splits_by_page (bool): Whether the parser splits by page or not by default. dedupe: Avoiding the error of duplicate characters if `dedupe=True`.
test_appx_search_with_lucene_filter
"""Test Approximate Search with Lucene Filter.""" lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine='lucene') output = docsearch.similarity_search('foo', k=3, lucene_filter= lucene_filter_val) assert output == [Document(page_content='bar')]
def test_appx_search_with_lucene_filter() ->None: """Test Approximate Search with Lucene Filter.""" lucene_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, engine='lucene') output = docsearch.similarity_search('foo', k=3, lucene_filter= lucene_filter_val) assert output == [Document(page_content='bar')]
Test Approximate Search with Lucene Filter.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
Get the namespace of the langchain object.
test_timescalevector_delete
"""Test deleting functionality.""" texts = ['bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) texts = ['foo'] meta = [{'b': 'c'}] ids = docsearch.add_texts(texts, meta) output = docsearch.similarity_search('bar', k=10) assert len(output) == 3 docsearch.delete(ids) output = docsearch.similarity_search('bar', k=10) assert len(output) == 2 docsearch.delete_by_metadata({'a': 'b'}) output = docsearch.similarity_search('bar', k=10) assert len(output) == 0
def test_timescalevector_delete() ->None: """Test deleting functionality.""" texts = ['bar', 'baz'] docs = [Document(page_content=t, metadata={'a': 'b'}) for t in texts] docsearch = TimescaleVector.from_documents(documents=docs, collection_name='test_collection', embedding= FakeEmbeddingsWithAdaDimension(), service_url=SERVICE_URL, pre_delete_collection=True) texts = ['foo'] meta = [{'b': 'c'}] ids = docsearch.add_texts(texts, meta) output = docsearch.similarity_search('bar', k=10) assert len(output) == 3 docsearch.delete(ids) output = docsearch.similarity_search('bar', k=10) assert len(output) == 2 docsearch.delete_by_metadata({'a': 'b'}) output = docsearch.similarity_search('bar', k=10) assert len(output) == 0
Test deleting functionality.